Code
stringlengths 131
28.2k
| Unit Test
stringlengths 40
32.1k
| __index_level_0__
int64 0
2.63k
|
---|---|---|
#ifndef THIRD_PARTY_CEL_CPP_BASE_KIND_H_
#define THIRD_PARTY_CEL_CPP_BASE_KIND_H_
#include "common/kind.h"
#include "common/type_kind.h"
#include "common/value_kind.h"
#endif
#include "common/kind.h"
#include "absl/strings/string_view.h"
namespace cel {
absl::string_view KindToString(Kind kind) {
switch (kind) {
case Kind::kNullType:
return "null_type";
case Kind::kDyn:
return "dyn";
case Kind::kAny:
return "any";
case Kind::kType:
return "type";
case Kind::kTypeParam:
return "type_param";
case Kind::kFunction:
return "function";
case Kind::kBool:
return "bool";
case Kind::kInt:
return "int";
case Kind::kUint:
return "uint";
case Kind::kDouble:
return "double";
case Kind::kString:
return "string";
case Kind::kBytes:
return "bytes";
case Kind::kDuration:
return "duration";
case Kind::kTimestamp:
return "timestamp";
case Kind::kList:
return "list";
case Kind::kMap:
return "map";
case Kind::kStruct:
return "struct";
case Kind::kUnknown:
return "*unknown*";
case Kind::kOpaque:
return "*opaque*";
case Kind::kBoolWrapper:
return "google.protobuf.BoolValue";
case Kind::kIntWrapper:
return "google.protobuf.Int64Value";
case Kind::kUintWrapper:
return "google.protobuf.UInt64Value";
case Kind::kDoubleWrapper:
return "google.protobuf.DoubleValue";
case Kind::kStringWrapper:
return "google.protobuf.StringValue";
case Kind::kBytesWrapper:
return "google.protobuf.BytesValue";
default:
return "*error*";
}
}
} | #include "common/kind.h"
#include <limits>
#include <type_traits>
#include "common/type_kind.h"
#include "common/value_kind.h"
#include "internal/testing.h"
namespace cel {
namespace {
static_assert(std::is_same_v<std::underlying_type_t<TypeKind>,
std::underlying_type_t<ValueKind>>,
"TypeKind and ValueKind must have the same underlying type");
TEST(Kind, ToString) {
EXPECT_EQ(KindToString(Kind::kError), "*error*");
EXPECT_EQ(KindToString(Kind::kNullType), "null_type");
EXPECT_EQ(KindToString(Kind::kDyn), "dyn");
EXPECT_EQ(KindToString(Kind::kAny), "any");
EXPECT_EQ(KindToString(Kind::kType), "type");
EXPECT_EQ(KindToString(Kind::kBool), "bool");
EXPECT_EQ(KindToString(Kind::kInt), "int");
EXPECT_EQ(KindToString(Kind::kUint), "uint");
EXPECT_EQ(KindToString(Kind::kDouble), "double");
EXPECT_EQ(KindToString(Kind::kString), "string");
EXPECT_EQ(KindToString(Kind::kBytes), "bytes");
EXPECT_EQ(KindToString(Kind::kDuration), "duration");
EXPECT_EQ(KindToString(Kind::kTimestamp), "timestamp");
EXPECT_EQ(KindToString(Kind::kList), "list");
EXPECT_EQ(KindToString(Kind::kMap), "map");
EXPECT_EQ(KindToString(Kind::kStruct), "struct");
EXPECT_EQ(KindToString(Kind::kUnknown), "*unknown*");
EXPECT_EQ(KindToString(Kind::kOpaque), "*opaque*");
EXPECT_EQ(KindToString(Kind::kBoolWrapper), "google.protobuf.BoolValue");
EXPECT_EQ(KindToString(Kind::kIntWrapper), "google.protobuf.Int64Value");
EXPECT_EQ(KindToString(Kind::kUintWrapper), "google.protobuf.UInt64Value");
EXPECT_EQ(KindToString(Kind::kDoubleWrapper), "google.protobuf.DoubleValue");
EXPECT_EQ(KindToString(Kind::kStringWrapper), "google.protobuf.StringValue");
EXPECT_EQ(KindToString(Kind::kBytesWrapper), "google.protobuf.BytesValue");
EXPECT_EQ(KindToString(static_cast<Kind>(std::numeric_limits<int>::max())),
"*error*");
}
TEST(Kind, TypeKindRoundtrip) {
EXPECT_EQ(TypeKindToKind(KindToTypeKind(Kind::kBool)), Kind::kBool);
}
TEST(Kind, ValueKindRoundtrip) {
EXPECT_EQ(ValueKindToKind(KindToValueKind(Kind::kBool)), Kind::kBool);
}
TEST(Kind, IsTypeKind) {
EXPECT_TRUE(KindIsTypeKind(Kind::kBool));
EXPECT_TRUE(KindIsTypeKind(Kind::kAny));
EXPECT_TRUE(KindIsTypeKind(Kind::kDyn));
}
TEST(Kind, IsValueKind) {
EXPECT_TRUE(KindIsValueKind(Kind::kBool));
EXPECT_FALSE(KindIsValueKind(Kind::kAny));
EXPECT_FALSE(KindIsValueKind(Kind::kDyn));
}
TEST(Kind, Equality) {
EXPECT_EQ(Kind::kBool, TypeKind::kBool);
EXPECT_EQ(TypeKind::kBool, Kind::kBool);
EXPECT_EQ(Kind::kBool, ValueKind::kBool);
EXPECT_EQ(ValueKind::kBool, Kind::kBool);
EXPECT_NE(Kind::kBool, TypeKind::kInt);
EXPECT_NE(TypeKind::kInt, Kind::kBool);
EXPECT_NE(Kind::kBool, ValueKind::kInt);
EXPECT_NE(ValueKind::kInt, Kind::kBool);
}
TEST(TypeKind, ToString) {
EXPECT_EQ(TypeKindToString(TypeKind::kBool), KindToString(Kind::kBool));
}
TEST(ValueKind, ToString) {
EXPECT_EQ(ValueKindToString(ValueKind::kBool), KindToString(Kind::kBool));
}
}
} | 0 |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_VALUE_FACTORY_H_
#define THIRD_PARTY_CEL_CPP_COMMON_VALUE_FACTORY_H_
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "common/json.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/unknown.h"
#include "common/value.h"
namespace cel {
namespace common_internal {
class PiecewiseValueManager;
}
class ValueFactory : public virtual TypeFactory {
public:
Value CreateValueFromJson(Json json);
ListValue CreateListValueFromJsonArray(JsonArray json);
MapValue CreateMapValueFromJsonObject(JsonObject json);
ListValue CreateZeroListValue(ListTypeView type);
MapValue CreateZeroMapValue(MapTypeView type);
OptionalValue CreateZeroOptionalValue(OptionalTypeView type);
ListValueView GetZeroDynListValue();
MapValueView GetZeroDynDynMapValue();
MapValueView GetZeroStringDynMapValue();
OptionalValueView GetZeroDynOptionalValue();
NullValue GetNullValue() { return NullValue{}; }
ErrorValue CreateErrorValue(absl::Status status) {
return ErrorValue{std::move(status)};
}
BoolValue CreateBoolValue(bool value) { return BoolValue{value}; }
IntValue CreateIntValue(int64_t value) { return IntValue{value}; }
UintValue CreateUintValue(uint64_t value) { return UintValue{value}; }
DoubleValue CreateDoubleValue(double value) { return DoubleValue{value}; }
BytesValue GetBytesValue() { return BytesValue(); }
absl::StatusOr<BytesValue> CreateBytesValue(const char* value) {
return CreateBytesValue(absl::string_view(value));
}
absl::StatusOr<BytesValue> CreateBytesValue(absl::string_view value) {
return CreateBytesValue(std::string(value));
}
absl::StatusOr<BytesValue> CreateBytesValue(std::string value);
absl::StatusOr<BytesValue> CreateBytesValue(absl::Cord value) {
return BytesValue(std::move(value));
}
template <typename Releaser>
absl::StatusOr<BytesValue> CreateBytesValue(absl::string_view value,
Releaser&& releaser) {
return BytesValue(
absl::MakeCordFromExternal(value, std::forward<Releaser>(releaser)));
}
StringValue GetStringValue() { return StringValue(); }
absl::StatusOr<StringValue> CreateStringValue(const char* value) {
return CreateStringValue(absl::string_view(value));
}
absl::StatusOr<StringValue> CreateStringValue(absl::string_view value) {
return CreateStringValue(std::string(value));
}
absl::StatusOr<StringValue> CreateStringValue(std::string value);
absl::StatusOr<StringValue> CreateStringValue(absl::Cord value);
template <typename Releaser>
absl::StatusOr<StringValue> CreateStringValue(absl::string_view value,
Releaser&& releaser) {
return StringValue(
absl::MakeCordFromExternal(value, std::forward<Releaser>(releaser)));
}
StringValue CreateUncheckedStringValue(const char* value) {
return CreateUncheckedStringValue(absl::string_view(value));
}
StringValue CreateUncheckedStringValue(absl::string_view value) {
return CreateUncheckedStringValue(std::string(value));
}
StringValue CreateUncheckedStringValue(std::string value);
StringValue CreateUncheckedStringValue(absl::Cord value) {
return StringValue(std::move(value));
}
template <typename Releaser>
StringValue CreateUncheckedStringValue(absl::string_view value,
Releaser&& releaser) {
return StringValue(
absl::MakeCordFromExternal(value, std::forward<Releaser>(releaser)));
}
absl::StatusOr<DurationValue> CreateDurationValue(absl::Duration value);
DurationValue CreateUncheckedDurationValue(absl::Duration value) {
return DurationValue{value};
}
absl::StatusOr<TimestampValue> CreateTimestampValue(absl::Time value);
TimestampValue CreateUncheckedTimestampValue(absl::Time value) {
return TimestampValue{value};
}
TypeValue CreateTypeValue(TypeView type) { return TypeValue{Type(type)}; }
UnknownValue CreateUnknownValue() {
return CreateUnknownValue(AttributeSet(), FunctionResultSet());
}
UnknownValue CreateUnknownValue(AttributeSet attribute_set) {
return CreateUnknownValue(std::move(attribute_set), FunctionResultSet());
}
UnknownValue CreateUnknownValue(FunctionResultSet function_result_set) {
return CreateUnknownValue(AttributeSet(), std::move(function_result_set));
}
UnknownValue CreateUnknownValue(AttributeSet attribute_set,
FunctionResultSet function_result_set) {
return UnknownValue{
Unknown{std::move(attribute_set), std::move(function_result_set)}};
}
protected:
friend class common_internal::PiecewiseValueManager;
virtual ListValue CreateZeroListValueImpl(ListTypeView type) = 0;
virtual MapValue CreateZeroMapValueImpl(MapTypeView type) = 0;
virtual OptionalValue CreateZeroOptionalValueImpl(OptionalTypeView type) = 0;
};
}
#endif
#include "common/value_factory.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/functional/overload.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/casting.h"
#include "common/internal/arena_string.h"
#include "common/internal/reference_count.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/value_cache.h"
#include "internal/status_macros.h"
#include "internal/time.h"
#include "internal/utf8.h"
namespace cel {
namespace {
using common_internal::ProcessLocalValueCache;
void JsonToValue(const Json& json, ValueFactory& value_factory, Value& result) {
absl::visit(
absl::Overload(
[&result](JsonNull) { result = NullValue(); },
[&result](JsonBool value) { result = BoolValue(value); },
[&result](JsonNumber value) { result = DoubleValue(value); },
[&result](const JsonString& value) { result = StringValue(value); },
[&value_factory, &result](const JsonArray& value) {
result = value_factory.CreateListValueFromJsonArray(value);
},
[&value_factory, &result](const JsonObject& value) {
result = value_factory.CreateMapValueFromJsonObject(value);
}),
json);
}
void JsonDebugString(const Json& json, std::string& out);
void JsonArrayDebugString(const JsonArray& json, std::string& out) {
out.push_back('[');
auto element = json.begin();
if (element != json.end()) {
JsonDebugString(*element, out);
++element;
for (; element != json.end(); ++element) {
out.append(", ");
JsonDebugString(*element, out);
}
}
out.push_back(']');
}
void JsonObjectEntryDebugString(const JsonString& key, const Json& value,
std::string& out) {
out.append(StringValueView(key).DebugString());
out.append(": ");
JsonDebugString(value, out);
}
void JsonObjectDebugString(const JsonObject& json, std::string& out) {
std::vector<JsonString> keys;
keys.reserve(json.size());
for (const auto& entry : json) {
keys.push_back(entry.first);
}
std::stable_sort(keys.begin(), keys.end());
out.push_back('{');
auto key = keys.begin();
if (key != keys.end()) {
JsonObjectEntryDebugString(*key, json.find(*key)->second, out);
++key;
for (; key != keys.end(); ++key) {
out.append(", ");
JsonObjectEntryDebugString(*key, json.find(*key)->second, out);
}
}
out.push_back('}');
}
void JsonDebugString(const Json& json, std::string& out) {
absl::visit(absl::Overload(
[&out](JsonNull) -> void {
out.append(NullValueView().DebugString());
},
[&out](JsonBool value) -> void {
out.append(BoolValueView(value).DebugString());
},
[&out](JsonNumber value) -> void {
out.append(DoubleValueView(value).DebugString());
},
[&out](const JsonString& value) -> void {
out.append(StringValueView(value).DebugString());
},
[&out](const JsonArray& value) -> void {
JsonArrayDebugString(value, out);
},
[&out](const JsonObject& value) -> void {
JsonObjectDebugString(value, out);
}),
json);
}
class JsonListValue final : public ParsedListValueInterface {
public:
explicit JsonListValue(JsonArray array) : array_(std::move(array)) {}
std::string DebugString() const override {
std::string out;
JsonArrayDebugString(array_, out);
return out;
}
bool IsEmpty() const override { return array_.empty(); }
size_t Size() const override { return array_.size(); }
absl::StatusOr<JsonArray> ConvertToJsonArray(
AnyToJsonConverter&) const override {
return array_;
}
private:
Type GetTypeImpl(TypeManager& type_manager) const override {
return ListType(type_manager.GetDynListType());
}
absl::Status GetImpl(ValueManager& value_manager, size_t index,
Value& result) const override {
JsonToValue(array_[index], value_manager, result);
return absl::OkStatus();
}
NativeTypeId GetNativeTypeId() const noexcept override {
return NativeTypeId::For<JsonListValue>();
}
const JsonArray array_;
};
class JsonMapValueKeyIterator final : public ValueIterator {
public:
explicit JsonMapValueKeyIterator(
const JsonObject& object ABSL_ATTRIBUTE_LIFETIME_BOUND)
: begin_(object.begin()), end_(object.end()) {}
bool HasNext() override { return begin_ != end_; }
absl::Status Next(ValueManager&, Value& result) override {
if (ABSL_PREDICT_FALSE(begin_ == end_)) {
return absl::FailedPreconditionError(
"ValueIterator::Next() called when "
"ValueIterator::HasNext() returns false");
}
const auto& key = begin_->first;
++begin_;
result = StringValue(key);
return absl::OkStatus();
}
private:
typename JsonObject::const_iterator begin_;
typename JsonObject::const_iterator end_;
};
class JsonMapValue final : public ParsedMapValueInterface {
public:
explicit JsonMapValue(JsonObject object) : object_(std::move(object)) {}
std::string DebugString() const override {
std::string out;
JsonObjectDebugString(object_, out);
return out;
}
bool IsEmpty() const override { return object_.empty(); }
size_t Size() const override { return object_.size(); }
absl::Status ListKeys(ValueManager& value_manager,
ListValue& result) const override {
JsonArrayBuilder keys;
keys.reserve(object_.size());
for (const auto& entry : object_) {
keys.push_back(entry.first);
}
result = ParsedListValue(
value_manager.GetMemoryManager().MakeShared<JsonListValue>(
std::move(keys).Build()));
return absl::OkStatus();
}
absl::StatusOr<absl::Nonnull<ValueIteratorPtr>> NewIterator(
ValueManager&) const override {
return std::make_unique<JsonMapValueKeyIterator>(object_);
}
absl::StatusOr<JsonObject> ConvertToJsonObject(
AnyToJsonConverter&) const override {
return object_;
}
private:
absl::StatusOr<bool> FindImpl(ValueManager& value_manager, ValueView key,
Value& result) const override {
return Cast<StringValueView>(key).NativeValue(absl::Overload(
[this, &value_manager, &result](absl::string_view value) -> bool {
if (auto entry = object_.find(value); entry != object_.end()) {
JsonToValue(entry->second, value_manager, result);
return true;
}
return false;
},
[this, &value_manager, &result](const absl::Cord& value) -> bool {
if (auto entry = object_.find(value); entry != object_.end()) {
JsonToValue(entry->second, value_manager, result);
return true;
}
return false;
}));
}
absl::StatusOr<bool> HasImpl(ValueManager&, ValueView key) const override {
return Cast<StringValueView>(key).NativeValue(absl::Overload(
[this](absl::string_view value) -> bool {
return object_.contains(value);
},
[this](const absl::Cord& value) -> bool {
return object_.contains(value);
}));
}
Type GetTypeImpl(TypeManager& type_manager) const override {
return MapType(type_manager.GetStringDynMapType());
}
NativeTypeId GetNativeTypeId() const noexcept override {
return NativeTypeId::For<JsonMapValue>();
}
const JsonObject object_;
};
}
Value ValueFactory::CreateValueFromJson(Json json) {
return absl::visit(
absl::Overload(
[](JsonNull) -> Value { return NullValue(); },
[](JsonBool value) -> Value { return BoolValue(value); },
[](JsonNumber value) -> Value { return DoubleValue(value); },
[](const JsonString& value) -> Value { return StringValue(value); },
[this](JsonArray value) -> Value {
return CreateListValueFromJsonArray(std::move(value));
},
[this](JsonObject value) -> Value {
return CreateMapValueFromJsonObject(std::move(value));
}),
std::move(json));
}
ListValue ValueFactory::CreateListValueFromJsonArray(JsonArray json) {
if (json.empty()) {
return ListValue(GetZeroDynListValue());
}
return ParsedListValue(
GetMemoryManager().MakeShared<JsonListValue>(std::move(json)));
}
MapValue ValueFactory::CreateMapValueFromJsonObject(JsonObject json) {
if (json.empty()) {
return MapValue(GetZeroStringDynMapValue());
}
return ParsedMapValue(
GetMemoryManager().MakeShared<JsonMapValue>(std::move(json)));
}
ListValue ValueFactory::CreateZeroListValue(ListTypeView type) {
if (auto list_value = ProcessLocalValueCache::Get()->GetEmptyListValue(type);
list_value.has_value()) {
return ListValue(*list_value);
}
return CreateZeroListValueImpl(type);
}
MapValue ValueFactory::CreateZeroMapValue(MapTypeView type) {
if (auto map_value = ProcessLocalValueCache::Get()->GetEmptyMapValue(type);
map_value.has_value()) {
return MapValue(*map_value);
}
return CreateZeroMapValueImpl(type);
}
OptionalValue ValueFactory::CreateZeroOptionalValue(OptionalTypeView type) {
if (auto optional_value =
ProcessLocalValueCache::Get()->GetEmptyOptionalValue(type);
optional_value.has_value()) {
return OptionalValue(*optional_value);
}
return CreateZeroOptionalValueImpl(type);
}
ListValueView ValueFactory::GetZeroDynListValue() {
return ProcessLocalValueCache::Get()->GetEmptyDynListValue();
}
MapValueView ValueFactory::GetZeroDynDynMapValue() {
return ProcessLocalValueCache::Get()->GetEmptyDynDynMapValue();
}
MapValueView ValueFactory::GetZeroStringDynMapValue() {
return ProcessLocalValueCache::Get()->GetEmptyStringDynMapValue();
}
OptionalValueView ValueFactory::GetZeroDynOptionalValue() {
return ProcessLocalValueCache::Get()->GetEmptyDynOptionalValue();
}
namespace {
class ReferenceCountedString final : public common_internal::ReferenceCounted {
public:
static const ReferenceCountedString* New(std::string&& string) {
return new ReferenceCountedString(std::move(string));
}
const char* data() const {
return std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->data();
}
size_t size() const {
return std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->size();
}
private:
explicit ReferenceCountedString(std::string&& robbed) : ReferenceCounted() {
::new (static_cast<void*>(&string_[0])) std::string(std::move(robbed));
}
void Finalize() noexcept override {
std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->~basic_string();
}
alignas(std::string) char string_[sizeof(std::string)];
};
}
static void StringDestructor(void* string) {
static_cast<std::string*>(string)->~basic_string();
}
absl::StatusOr<BytesValue> ValueFactory::CreateBytesValue(std::string value) {
auto memory_manager = GetMemoryManager();
switch (memory_manager.memory_management()) {
case MemoryManagement::kPooling: {
auto* string = ::new (
memory_manager.Allocate(sizeof(std::string), alignof(std::string)))
std::string(std::move(value));
memory_manager.OwnCustomDestructor(string, &StringDestructor);
return BytesValue{common_internal::ArenaString(*string)};
}
case MemoryManagement::kReferenceCounting: {
auto* refcount = ReferenceCountedString::New(std::move(value));
auto bytes_value = BytesValue{common_internal::SharedByteString(
refcount, absl::string_view(refcount->data(), refcount->size()))};
common_internal::StrongUnref(*refcount);
return bytes_value;
}
}
}
StringValue ValueFactory::CreateUncheckedStringValue(std::string value) {
auto memory_manager = GetMemoryManager();
switch (memory_manager.memory_management()) {
case MemoryManagement::kPooling: {
auto* string = ::new (
memory_manager.Allocate(sizeof(std::string), alignof(std::string)))
std::string(std::move(value));
memory_manager.OwnCustomDestructor(string, &StringDestructor);
return StringValue{common_internal::ArenaString(*string)};
}
case MemoryManagement::kReferenceCounting: {
auto* refcount = ReferenceCountedString::New(std::move(value));
auto string_value = StringValue{common_internal::SharedByteString(
refcount, absl::string_view(refcount->data(), refcount->size()))};
common_internal::StrongUnref(*refcount);
return string_value;
}
}
}
absl::StatusOr<StringValue> ValueFactory::CreateStringValue(std::string value) {
auto [count, ok] = internal::Utf8Validate(value);
if (ABSL_PREDICT_FALSE(!ok)) {
return absl::InvalidArgumentError(
"Illegal byte sequence in UTF-8 encoded string");
}
return CreateUncheckedStringValue(std::move(value));
}
absl::StatusOr<StringValue> ValueFactory::CreateStringValue(absl::Cord value) {
auto [count, ok] = internal::Utf8Validate(value);
if (ABSL_PREDICT_FALSE(!ok)) {
return absl::InvalidArgumentError(
"Illegal byte sequence in UTF-8 encoded string");
}
return StringValue(std::move(value));
}
absl::StatusOr<DurationValue> ValueFactory::CreateDurationValue(
absl::Duration value) {
CEL_RETURN_IF_ERROR(internal::ValidateDuration(value));
return DurationValue{value};
}
absl::StatusOr<TimestampValue> ValueFactory::CreateTimestampValue(
absl::Time value) {
CEL_RETURN_IF_ERROR(internal::ValidateTimestamp(value));
return TimestampValue{value};
}
} | #include "common/value_factory.h"
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/memory_testing.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/type_reflector.h"
#include "common/types/type_cache.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/testing.h"
namespace cel {
namespace {
using common_internal::ProcessLocalTypeCache;
using testing::TestParamInfo;
using testing::TestWithParam;
using testing::UnorderedElementsAreArray;
using cel::internal::IsOkAndHolds;
enum class ThreadSafety {
kCompatible,
kSafe,
};
std::ostream& operator<<(std::ostream& out, ThreadSafety thread_safety) {
switch (thread_safety) {
case ThreadSafety::kCompatible:
return out << "THREAD_SAFE";
case ThreadSafety::kSafe:
return out << "THREAD_COMPATIBLE";
}
}
class ValueFactoryTest
: public common_internal::ThreadCompatibleMemoryTest<ThreadSafety> {
public:
void SetUp() override {
switch (thread_safety()) {
case ThreadSafety::kCompatible:
value_manager_ = NewThreadCompatibleValueManager(
memory_manager(),
NewThreadCompatibleTypeReflector(memory_manager()));
break;
case ThreadSafety::kSafe:
value_manager_ = NewThreadSafeValueManager(
memory_manager(), NewThreadSafeTypeReflector(memory_manager()));
break;
}
}
void TearDown() override { Finish(); }
void Finish() {
value_manager_.reset();
ThreadCompatibleMemoryTest::Finish();
}
TypeFactory& type_factory() const { return value_manager(); }
TypeManager& type_manager() const { return value_manager(); }
ValueFactory& value_factory() const { return value_manager(); }
ValueManager& value_manager() const { return **value_manager_; }
ThreadSafety thread_safety() const { return std::get<1>(GetParam()); }
static std::string ToString(
TestParamInfo<std::tuple<MemoryManagement, ThreadSafety>> param) {
std::ostringstream out;
out << std::get<0>(param.param) << "_" << std::get<1>(param.param);
return out.str();
}
private:
absl::optional<Shared<ValueManager>> value_manager_;
};
TEST_P(ValueFactoryTest, JsonValueNull) {
auto value = value_factory().CreateValueFromJson(kJsonNull);
EXPECT_TRUE(InstanceOf<NullValue>(value));
}
TEST_P(ValueFactoryTest, JsonValueBool) {
auto value = value_factory().CreateValueFromJson(true);
ASSERT_TRUE(InstanceOf<BoolValue>(value));
EXPECT_TRUE(Cast<BoolValue>(value).NativeValue());
}
TEST_P(ValueFactoryTest, JsonValueNumber) {
auto value = value_factory().CreateValueFromJson(1.0);
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
EXPECT_EQ(Cast<DoubleValue>(value).NativeValue(), 1.0);
}
TEST_P(ValueFactoryTest, JsonValueString) {
auto value = value_factory().CreateValueFromJson(absl::Cord("foo"));
ASSERT_TRUE(InstanceOf<StringValue>(value));
EXPECT_EQ(Cast<StringValue>(value).NativeString(), "foo");
}
JsonObject NewJsonObjectForTesting(bool with_array = true,
bool with_nested_object = true);
JsonArray NewJsonArrayForTesting(bool with_nested_array = true,
bool with_object = true) {
JsonArrayBuilder builder;
builder.push_back(kJsonNull);
builder.push_back(true);
builder.push_back(1.0);
builder.push_back(absl::Cord("foo"));
if (with_nested_array) {
builder.push_back(NewJsonArrayForTesting(false, false));
}
if (with_object) {
builder.push_back(NewJsonObjectForTesting(false, false));
}
return std::move(builder).Build();
}
JsonObject NewJsonObjectForTesting(bool with_array, bool with_nested_object) {
JsonObjectBuilder builder;
builder.insert_or_assign(absl::Cord("a"), kJsonNull);
builder.insert_or_assign(absl::Cord("b"), true);
builder.insert_or_assign(absl::Cord("c"), 1.0);
builder.insert_or_assign(absl::Cord("d"), absl::Cord("foo"));
if (with_array) {
builder.insert_or_assign(absl::Cord("e"),
NewJsonArrayForTesting(false, false));
}
if (with_nested_object) {
builder.insert_or_assign(absl::Cord("f"),
NewJsonObjectForTesting(false, false));
}
return std::move(builder).Build();
}
TEST_P(ValueFactoryTest, JsonValueArray) {
auto value = value_factory().CreateValueFromJson(NewJsonArrayForTesting());
ASSERT_TRUE(InstanceOf<ListValue>(value));
EXPECT_EQ(TypeView(value.GetType(type_manager())),
type_factory().GetDynListType());
auto list_value = Cast<ListValue>(value);
EXPECT_THAT(list_value.IsEmpty(), IsOkAndHolds(false));
EXPECT_THAT(list_value.Size(), IsOkAndHolds(6));
EXPECT_EQ(list_value.DebugString(),
"[null, true, 1.0, \"foo\", [null, true, 1.0, \"foo\"], {\"a\": "
"null, \"b\": true, \"c\": 1.0, \"d\": \"foo\"}]");
ASSERT_OK_AND_ASSIGN(auto element, list_value.Get(value_manager(), 0));
EXPECT_TRUE(InstanceOf<NullValue>(element));
}
TEST_P(ValueFactoryTest, JsonValueObject) {
auto value = value_factory().CreateValueFromJson(NewJsonObjectForTesting());
ASSERT_TRUE(InstanceOf<MapValue>(value));
EXPECT_EQ(TypeView(value.GetType(type_manager())),
type_factory().GetStringDynMapType());
auto map_value = Cast<MapValue>(value);
EXPECT_THAT(map_value.IsEmpty(), IsOkAndHolds(false));
EXPECT_THAT(map_value.Size(), IsOkAndHolds(6));
EXPECT_EQ(map_value.DebugString(),
"{\"a\": null, \"b\": true, \"c\": 1.0, \"d\": \"foo\", \"e\": "
"[null, true, 1.0, \"foo\"], \"f\": {\"a\": null, \"b\": true, "
"\"c\": 1.0, \"d\": \"foo\"}}");
ASSERT_OK_AND_ASSIGN(auto keys, map_value.ListKeys(value_manager()));
EXPECT_THAT(keys.Size(), IsOkAndHolds(6));
ASSERT_OK_AND_ASSIGN(auto keys_iterator,
map_value.NewIterator(value_manager()));
std::vector<StringValue> string_keys;
while (keys_iterator->HasNext()) {
ASSERT_OK_AND_ASSIGN(auto key, keys_iterator->Next(value_manager()));
string_keys.push_back(StringValue(Cast<StringValue>(key)));
}
EXPECT_THAT(string_keys, UnorderedElementsAreArray(
{StringValueView("a"), StringValueView("b"),
StringValueView("c"), StringValueView("d"),
StringValueView("e"), StringValueView("f")}));
ASSERT_OK_AND_ASSIGN(auto has,
map_value.Has(value_manager(), StringValueView("a")));
ASSERT_TRUE(InstanceOf<BoolValue>(has));
EXPECT_TRUE(Cast<BoolValue>(has).NativeValue());
ASSERT_OK_AND_ASSIGN(
has, map_value.Has(value_manager(), StringValueView(absl::Cord("a"))));
ASSERT_TRUE(InstanceOf<BoolValue>(has));
EXPECT_TRUE(Cast<BoolValue>(has).NativeValue());
ASSERT_OK_AND_ASSIGN(auto get,
map_value.Get(value_manager(), StringValueView("a")));
ASSERT_TRUE(InstanceOf<NullValue>(get));
ASSERT_OK_AND_ASSIGN(
get, map_value.Get(value_manager(), StringValueView(absl::Cord("a"))));
ASSERT_TRUE(InstanceOf<NullValue>(get));
}
TEST_P(ValueFactoryTest, ListValue) {
auto list_value1 = value_factory().CreateZeroListValue(
type_factory().CreateListType(StringTypeView()));
EXPECT_TRUE(
Is(list_value1, value_factory().CreateZeroListValue(
type_factory().CreateListType(StringTypeView()))));
EXPECT_FALSE(
Is(list_value1, value_factory().CreateZeroListValue(
type_factory().CreateListType(BoolTypeView()))));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto list_value2 = value_factory().CreateZeroListValue(
type_factory().CreateListType(struct_type1));
EXPECT_TRUE(
Is(list_value2, value_factory().CreateZeroListValue(
type_factory().CreateListType(struct_type1))));
EXPECT_FALSE(
Is(list_value2, value_factory().CreateZeroListValue(
type_factory().CreateListType(struct_type2))));
auto zero_list_value = value_factory().GetZeroDynListValue();
EXPECT_THAT(zero_list_value.IsEmpty(), IsOkAndHolds(true));
EXPECT_THAT(zero_list_value.Size(), IsOkAndHolds(0));
EXPECT_EQ(zero_list_value.GetType(type_manager()),
ProcessLocalTypeCache::Get()->GetDynListType());
}
TEST_P(ValueFactoryTest, MapValue) {
auto map_value1 = value_factory().CreateZeroMapValue(
type_factory().CreateMapType(StringTypeView(), IntTypeView()));
EXPECT_TRUE(Is(map_value1, value_factory().CreateZeroMapValue(
type_factory().CreateMapType(StringTypeView(),
IntTypeView()))));
EXPECT_FALSE(Is(map_value1, value_factory().CreateZeroMapValue(
type_factory().CreateMapType(
StringTypeView(), BoolTypeView()))));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto map_value2 = value_factory().CreateZeroMapValue(
type_factory().CreateMapType(StringTypeView(), struct_type1));
EXPECT_TRUE(Is(map_value2, value_factory().CreateZeroMapValue(
type_factory().CreateMapType(StringTypeView(),
struct_type1))));
EXPECT_FALSE(Is(map_value2, value_factory().CreateZeroMapValue(
type_factory().CreateMapType(StringTypeView(),
struct_type2))));
auto zero_map_value = value_factory().GetZeroDynDynMapValue();
EXPECT_THAT(zero_map_value.IsEmpty(), IsOkAndHolds(true));
EXPECT_THAT(zero_map_value.Size(), IsOkAndHolds(0));
EXPECT_EQ(zero_map_value.GetType(type_manager()),
ProcessLocalTypeCache::Get()->GetDynDynMapType());
zero_map_value = value_factory().GetZeroStringDynMapValue();
EXPECT_THAT(zero_map_value.IsEmpty(), IsOkAndHolds(true));
EXPECT_THAT(zero_map_value.Size(), IsOkAndHolds(0));
EXPECT_EQ(zero_map_value.GetType(type_manager()),
ProcessLocalTypeCache::Get()->GetStringDynMapType());
}
TEST_P(ValueFactoryTest, OptionalType) {
auto optional_value1 = value_factory().CreateZeroOptionalValue(
type_factory().CreateOptionalType(StringTypeView()));
EXPECT_TRUE(Is(optional_value1,
value_factory().CreateZeroOptionalValue(
type_factory().CreateOptionalType(StringTypeView()))));
EXPECT_FALSE(Is(optional_value1,
value_factory().CreateZeroOptionalValue(
type_factory().CreateOptionalType(BoolTypeView()))));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto optional_value2 = value_factory().CreateZeroOptionalValue(
type_factory().CreateOptionalType(struct_type1));
EXPECT_TRUE(Is(optional_value2,
value_factory().CreateZeroOptionalValue(
type_factory().CreateOptionalType(struct_type1))));
EXPECT_FALSE(Is(optional_value2,
value_factory().CreateZeroOptionalValue(
type_factory().CreateOptionalType(struct_type2))));
auto zero_optional_value = value_factory().GetZeroDynOptionalValue();
EXPECT_FALSE(zero_optional_value.HasValue());
EXPECT_EQ(zero_optional_value.GetType(type_manager()),
ProcessLocalTypeCache::Get()->GetDynOptionalType());
}
INSTANTIATE_TEST_SUITE_P(
ValueFactoryTest, ValueFactoryTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
::testing::Values(ThreadSafety::kCompatible,
ThreadSafety::kSafe)),
ValueFactoryTest::ToString);
}
} | 1 |
#ifndef THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_VALUE_TESTING_H_
#define THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_VALUE_TESTING_H_
#include <ostream>
#include "absl/status/status.h"
#include "common/value.h"
#include "extensions/protobuf/internal/message.h"
#include "extensions/protobuf/value.h"
#include "internal/testing.h"
namespace cel::extensions::test {
template <typename MessageType>
class StructValueAsProtoMatcher {
public:
using is_gtest_matcher = void;
explicit StructValueAsProtoMatcher(testing::Matcher<MessageType>&& m)
: m_(std::move(m)) {}
bool MatchAndExplain(cel::Value v,
testing::MatchResultListener* result_listener) const {
MessageType msg;
absl::Status s = ProtoMessageFromValue(v, msg);
if (!s.ok()) {
*result_listener << "cannot convert to "
<< MessageType::descriptor()->full_name() << ": " << s;
return false;
}
return m_.MatchAndExplain(msg, result_listener);
}
void DescribeTo(std::ostream* os) const {
*os << "matches proto message " << m_;
}
void DescribeNegationTo(std::ostream* os) const {
*os << "does not match proto message " << m_;
}
private:
testing::Matcher<MessageType> m_;
};
template <typename MessageType>
inline StructValueAsProtoMatcher<MessageType> StructValueAsProto(
testing::Matcher<MessageType>&& m) {
static_assert(
cel::extensions::protobuf_internal::IsProtoMessage<MessageType>);
return StructValueAsProtoMatcher<MessageType>(std::move(m));
}
}
#endif
#include "common/value_testing.h"
#include <cstdint>
#include <ostream>
#include <string>
#include <utility>
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "common/casting.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "internal/testing.h"
namespace cel {
void PrintTo(const Value& value, std::ostream* os) { *os << value << "\n"; }
namespace test {
namespace {
using testing::Matcher;
template <typename Type>
constexpr ValueKind ToValueKind() {
if constexpr (std::is_same_v<Type, BoolValue>) {
return ValueKind::kBool;
} else if constexpr (std::is_same_v<Type, IntValue>) {
return ValueKind::kInt;
} else if constexpr (std::is_same_v<Type, UintValue>) {
return ValueKind::kUint;
} else if constexpr (std::is_same_v<Type, DoubleValue>) {
return ValueKind::kDouble;
} else if constexpr (std::is_same_v<Type, StringValue>) {
return ValueKind::kString;
} else if constexpr (std::is_same_v<Type, BytesValue>) {
return ValueKind::kBytes;
} else if constexpr (std::is_same_v<Type, DurationValue>) {
return ValueKind::kDuration;
} else if constexpr (std::is_same_v<Type, TimestampValue>) {
return ValueKind::kTimestamp;
} else if constexpr (std::is_same_v<Type, ErrorValue>) {
return ValueKind::kError;
} else if constexpr (std::is_same_v<Type, MapValue>) {
return ValueKind::kMap;
} else if constexpr (std::is_same_v<Type, ListValue>) {
return ValueKind::kList;
} else if constexpr (std::is_same_v<Type, StructValue>) {
return ValueKind::kStruct;
} else if constexpr (std::is_same_v<Type, OpaqueValue>) {
return ValueKind::kOpaque;
} else {
return ValueKind::kError;
}
}
template <typename Type, typename NativeType>
class SimpleTypeMatcherImpl : public testing::MatcherInterface<const Value&> {
public:
using MatcherType = Matcher<NativeType>;
explicit SimpleTypeMatcherImpl(MatcherType&& matcher)
: matcher_(std::forward<MatcherType>(matcher)) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
return InstanceOf<Type>(v) &&
matcher_.MatchAndExplain(Cast<Type>(v).NativeValue(), listener);
}
void DescribeTo(std::ostream* os) const override {
*os << absl::StrCat("kind is ", ValueKindToString(ToValueKind<Type>()),
" and ");
matcher_.DescribeTo(os);
}
private:
MatcherType matcher_;
};
template <typename Type>
class StringTypeMatcherImpl : public testing::MatcherInterface<const Value&> {
public:
using MatcherType = Matcher<std::string>;
explicit StringTypeMatcherImpl(MatcherType matcher)
: matcher_((std::move(matcher))) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
return InstanceOf<Type>(v) && matcher_.Matches(Cast<Type>(v).ToString());
}
void DescribeTo(std::ostream* os) const override {
*os << absl::StrCat("kind is ", ValueKindToString(ToValueKind<Type>()),
" and ");
matcher_.DescribeTo(os);
}
private:
MatcherType matcher_;
};
template <typename Type>
class AbstractTypeMatcherImpl : public testing::MatcherInterface<const Value&> {
public:
using MatcherType = Matcher<Type>;
explicit AbstractTypeMatcherImpl(MatcherType&& matcher)
: matcher_(std::forward<MatcherType>(matcher)) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
return InstanceOf<Type>(v) && matcher_.Matches(Cast<Type>(v));
}
void DescribeTo(std::ostream* os) const override {
*os << absl::StrCat("kind is ", ValueKindToString(ToValueKind<Type>()),
" and ");
matcher_.DescribeTo(os);
}
private:
MatcherType matcher_;
};
class OptionalValueMatcherImpl
: public testing::MatcherInterface<const Value&> {
public:
explicit OptionalValueMatcherImpl(ValueMatcher matcher)
: matcher_(std::move(matcher)) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
if (!InstanceOf<OptionalValue>(v)) {
*listener << "wanted OptionalValue, got " << ValueKindToString(v.kind());
return false;
}
const auto& optional_value = Cast<OptionalValue>(v);
if (!optional_value->HasValue()) {
*listener << "OptionalValue is not engaged";
return false;
}
return matcher_.MatchAndExplain(optional_value->Value(), listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "is OptionalValue that is engaged with value whose ";
matcher_.DescribeTo(os);
}
private:
ValueMatcher matcher_;
};
MATCHER(OptionalValueIsEmptyImpl, "is empty OptionalValue") {
const Value& v = arg;
if (!InstanceOf<OptionalValue>(v)) {
*result_listener << "wanted OptionalValue, got "
<< ValueKindToString(v.kind());
return false;
}
const auto& optional_value = Cast<OptionalValue>(v);
*result_listener << (optional_value.HasValue() ? "is not empty" : "is empty");
return !optional_value->HasValue();
}
}
ValueMatcher BoolValueIs(Matcher<bool> m) {
return ValueMatcher(new SimpleTypeMatcherImpl<BoolValue, bool>(std::move(m)));
}
ValueMatcher IntValueIs(Matcher<int64_t> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<IntValue, int64_t>(std::move(m)));
}
ValueMatcher UintValueIs(Matcher<uint64_t> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<UintValue, uint64_t>(std::move(m)));
}
ValueMatcher DoubleValueIs(Matcher<double> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<DoubleValue, double>(std::move(m)));
}
ValueMatcher TimestampValueIs(Matcher<absl::Time> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<TimestampValue, absl::Time>(std::move(m)));
}
ValueMatcher DurationValueIs(Matcher<absl::Duration> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<DurationValue, absl::Duration>(std::move(m)));
}
ValueMatcher ErrorValueIs(Matcher<absl::Status> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<ErrorValue, absl::Status>(std::move(m)));
}
ValueMatcher StringValueIs(Matcher<std::string> m) {
return ValueMatcher(new StringTypeMatcherImpl<StringValue>(std::move(m)));
}
ValueMatcher BytesValueIs(Matcher<std::string> m) {
return ValueMatcher(new StringTypeMatcherImpl<BytesValue>(std::move(m)));
}
ValueMatcher MapValueIs(Matcher<MapValue> m) {
return ValueMatcher(new AbstractTypeMatcherImpl<MapValue>(std::move(m)));
}
ValueMatcher ListValueIs(Matcher<ListValue> m) {
return ValueMatcher(new AbstractTypeMatcherImpl<ListValue>(std::move(m)));
}
ValueMatcher StructValueIs(Matcher<StructValue> m) {
return ValueMatcher(new AbstractTypeMatcherImpl<StructValue>(std::move(m)));
}
ValueMatcher OptionalValueIs(ValueMatcher m) {
return ValueMatcher(new OptionalValueMatcherImpl(std::move(m)));
}
ValueMatcher OptionalValueIsEmpty() { return OptionalValueIsEmptyImpl(); }
}
} | #include "extensions/protobuf/value_testing.h"
#include "common/memory.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "extensions/protobuf/memory_manager.h"
#include "extensions/protobuf/value.h"
#include "internal/proto_matchers.h"
#include "internal/testing.h"
#include "proto/test/v1/proto2/test_all_types.pb.h"
#include "google/protobuf/arena.h"
namespace cel::extensions::test {
namespace {
using ::cel::extensions::ProtoMessageToValue;
using ::cel::internal::test::EqualsProto;
using ::google::api::expr::test::v1::proto2::TestAllTypes;
class ProtoValueTesting : public common_internal::ThreadCompatibleValueTest<> {
protected:
MemoryManager NewThreadCompatiblePoolingMemoryManager() override {
return cel::extensions::ProtoMemoryManager(&arena_);
}
private:
google::protobuf::Arena arena_;
};
class ProtoValueTestingTest : public ProtoValueTesting {};
TEST_P(ProtoValueTestingTest, StructValueAsProtoSimple) {
TestAllTypes test_proto;
test_proto.set_single_int32(42);
test_proto.set_single_string("foo");
ASSERT_OK_AND_ASSIGN(cel::Value v,
ProtoMessageToValue(value_manager(), test_proto));
EXPECT_THAT(v, StructValueAsProto<TestAllTypes>(EqualsProto(R"pb(
single_int32: 42
single_string: "foo"
)pb")));
}
INSTANTIATE_TEST_SUITE_P(ProtoValueTesting, ProtoValueTestingTest,
testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
ProtoValueTestingTest::ToString);
}
} | 2 |
#ifndef THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_TYPE_INTROSPECTOR_H_
#define THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_TYPE_INTROSPECTOR_H_
#include "absl/base/nullability.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/type_introspector.h"
#include "google/protobuf/descriptor.h"
namespace cel::extensions {
class ProtoTypeIntrospector : public virtual TypeIntrospector {
public:
ProtoTypeIntrospector()
: ProtoTypeIntrospector(google::protobuf::DescriptorPool::generated_pool()) {}
explicit ProtoTypeIntrospector(
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool)
: descriptor_pool_(descriptor_pool) {}
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() const {
return descriptor_pool_;
}
protected:
absl::StatusOr<absl::optional<TypeView>> FindTypeImpl(
TypeFactory& type_factory, absl::string_view name,
Type& scratch) const final;
absl::StatusOr<absl::optional<StructTypeFieldView>>
FindStructTypeFieldByNameImpl(TypeFactory& type_factory,
absl::string_view type, absl::string_view name,
StructTypeField& scratch) const final;
private:
absl::Nonnull<const google::protobuf::DescriptorPool*> const descriptor_pool_;
};
}
#endif
#include "extensions/protobuf/type_introspector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/type_introspector.h"
#include "extensions/protobuf/type.h"
#include "internal/status_macros.h"
namespace cel::extensions {
absl::StatusOr<absl::optional<TypeView>> ProtoTypeIntrospector::FindTypeImpl(
TypeFactory& type_factory, absl::string_view name, Type& scratch) const {
const auto* desc = descriptor_pool()->FindMessageTypeByName(name);
if (desc == nullptr) {
return absl::nullopt;
}
scratch = type_factory.CreateStructType(desc->full_name());
return scratch;
}
absl::StatusOr<absl::optional<StructTypeFieldView>>
ProtoTypeIntrospector::FindStructTypeFieldByNameImpl(
TypeFactory& type_factory, absl::string_view type, absl::string_view name,
StructTypeField& scratch) const {
const auto* desc = descriptor_pool()->FindMessageTypeByName(type);
if (desc == nullptr) {
return absl::nullopt;
}
const auto* field_desc = desc->FindFieldByName(name);
if (field_desc == nullptr) {
field_desc = descriptor_pool()->FindExtensionByPrintableName(desc, name);
if (field_desc == nullptr) {
return absl::nullopt;
}
}
StructTypeFieldView result;
CEL_ASSIGN_OR_RETURN(
result.type,
ProtoFieldTypeToType(type_factory, field_desc, scratch.type));
result.name = field_desc->name();
result.number = field_desc->number();
return result;
}
} | #include "extensions/protobuf/type_introspector.h"
#include "absl/types/optional.h"
#include "common/type.h"
#include "common/type_testing.h"
#include "internal/testing.h"
#include "proto/test/v1/proto2/test_all_types.pb.h"
#include "google/protobuf/descriptor.h"
namespace cel::extensions {
namespace {
using ::google::api::expr::test::v1::proto2::TestAllTypes;
using testing::Eq;
using testing::Optional;
using cel::internal::IsOkAndHolds;
class ProtoTypeIntrospectorTest
: public common_internal::ThreadCompatibleTypeTest<> {
private:
Shared<TypeIntrospector> NewTypeIntrospector(
MemoryManagerRef memory_manager) override {
return memory_manager.MakeShared<ProtoTypeIntrospector>();
}
};
TEST_P(ProtoTypeIntrospectorTest, FindType) {
EXPECT_THAT(
type_manager().FindType(TestAllTypes::descriptor()->full_name()),
IsOkAndHolds(Optional(Eq(StructType(
memory_manager(), TestAllTypes::GetDescriptor()->full_name())))));
EXPECT_THAT(type_manager().FindType("type.that.does.not.Exist"),
IsOkAndHolds(Eq(absl::nullopt)));
}
TEST_P(ProtoTypeIntrospectorTest, FindStructTypeFieldByName) {
ASSERT_OK_AND_ASSIGN(
auto field, type_manager().FindStructTypeFieldByName(
TestAllTypes::descriptor()->full_name(), "single_int32"));
ASSERT_TRUE(field.has_value());
EXPECT_THAT(field->name, Eq("single_int32"));
EXPECT_THAT(field->number, Eq(1));
EXPECT_THAT(field->type, Eq(IntType{}));
EXPECT_THAT(
type_manager().FindStructTypeFieldByName(
TestAllTypes::descriptor()->full_name(), "field_that_does_not_exist"),
IsOkAndHolds(Eq(absl::nullopt)));
EXPECT_THAT(type_manager().FindStructTypeFieldByName(
"type.that.does.not.Exist", "does_not_matter"),
IsOkAndHolds(Eq(absl::nullopt)));
}
INSTANTIATE_TEST_SUITE_P(
ProtoTypeIntrospectorTest, ProtoTypeIntrospectorTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
ProtoTypeIntrospectorTest::ToString);
}
} | 3 |
#ifndef THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_INTERNAL_CONSTANT_H_
#define THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_INTERNAL_CONSTANT_H_
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "common/constant.h"
namespace cel::extensions::protobuf_internal {
absl::Status ConstantToProto(const Constant& constant,
absl::Nonnull<google::api::expr::v1alpha1::Constant*> proto);
absl::Status ConstantFromProto(const google::api::expr::v1alpha1::Constant& proto,
Constant& constant);
}
#endif
#include "extensions/protobuf/internal/constant.h"
#include <cstddef>
#include <cstdint>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/functional/overload.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "absl/types/variant.h"
#include "common/constant.h"
#include "internal/proto_time_encoding.h"
namespace cel::extensions::protobuf_internal {
using ConstantProto = google::api::expr::v1alpha1::Constant;
absl::Status ConstantToProto(const Constant& constant,
absl::Nonnull<ConstantProto*> proto) {
return absl::visit(absl::Overload(
[proto](absl::monostate) -> absl::Status {
proto->clear_constant_kind();
return absl::OkStatus();
},
[proto](std::nullptr_t) -> absl::Status {
proto->set_null_value(google::protobuf::NULL_VALUE);
return absl::OkStatus();
},
[proto](bool value) -> absl::Status {
proto->set_bool_value(value);
return absl::OkStatus();
},
[proto](int64_t value) -> absl::Status {
proto->set_int64_value(value);
return absl::OkStatus();
},
[proto](uint64_t value) -> absl::Status {
proto->set_uint64_value(value);
return absl::OkStatus();
},
[proto](double value) -> absl::Status {
proto->set_double_value(value);
return absl::OkStatus();
},
[proto](const BytesConstant& value) -> absl::Status {
proto->set_bytes_value(value);
return absl::OkStatus();
},
[proto](const StringConstant& value) -> absl::Status {
proto->set_string_value(value);
return absl::OkStatus();
},
[proto](absl::Duration value) -> absl::Status {
return internal::EncodeDuration(
value, proto->mutable_duration_value());
},
[proto](absl::Time value) -> absl::Status {
return internal::EncodeTime(
value, proto->mutable_timestamp_value());
}),
constant.kind());
}
absl::Status ConstantFromProto(const ConstantProto& proto, Constant& constant) {
switch (proto.constant_kind_case()) {
case ConstantProto::CONSTANT_KIND_NOT_SET:
constant = Constant{};
break;
case ConstantProto::kNullValue:
constant.set_null_value();
break;
case ConstantProto::kBoolValue:
constant.set_bool_value(proto.bool_value());
break;
case ConstantProto::kInt64Value:
constant.set_int_value(proto.int64_value());
break;
case ConstantProto::kUint64Value:
constant.set_uint_value(proto.uint64_value());
break;
case ConstantProto::kDoubleValue:
constant.set_double_value(proto.double_value());
break;
case ConstantProto::kStringValue:
constant.set_string_value(proto.string_value());
break;
case ConstantProto::kBytesValue:
constant.set_bytes_value(proto.bytes_value());
break;
case ConstantProto::kDurationValue:
constant.set_duration_value(
internal::DecodeDuration(proto.duration_value()));
break;
case ConstantProto::kTimestampValue:
constant.set_timestamp_value(
internal::DecodeTime(proto.timestamp_value()));
break;
default:
return absl::InvalidArgumentError(
absl::StrCat("unexpected ConstantKindCase: ",
static_cast<int>(proto.constant_kind_case())));
}
return absl::OkStatus();
}
} | #include "common/constant.h"
#include <cmath>
#include <string>
#include "absl/strings/has_absl_stringify.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::IsEmpty;
using testing::IsFalse;
using testing::IsTrue;
TEST(Constant, NullValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_null_value(), IsFalse());
const_expr.set_null_value();
EXPECT_THAT(const_expr.has_null_value(), IsTrue());
}
TEST(Constant, BoolValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_bool_value(), IsFalse());
EXPECT_EQ(const_expr.bool_value(), false);
const_expr.set_bool_value(false);
EXPECT_THAT(const_expr.has_bool_value(), IsTrue());
EXPECT_EQ(const_expr.bool_value(), false);
}
TEST(Constant, IntValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_int_value(), IsFalse());
EXPECT_EQ(const_expr.int_value(), 0);
const_expr.set_int_value(0);
EXPECT_THAT(const_expr.has_int_value(), IsTrue());
EXPECT_EQ(const_expr.int_value(), 0);
}
TEST(Constant, UintValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_uint_value(), IsFalse());
EXPECT_EQ(const_expr.uint_value(), 0);
const_expr.set_uint_value(0);
EXPECT_THAT(const_expr.has_uint_value(), IsTrue());
EXPECT_EQ(const_expr.uint_value(), 0);
}
TEST(Constant, DoubleValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_double_value(), IsFalse());
EXPECT_EQ(const_expr.double_value(), 0);
const_expr.set_double_value(0);
EXPECT_THAT(const_expr.has_double_value(), IsTrue());
EXPECT_EQ(const_expr.double_value(), 0);
}
TEST(Constant, BytesValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_bytes_value(), IsFalse());
EXPECT_THAT(const_expr.bytes_value(), IsEmpty());
const_expr.set_bytes_value("foo");
EXPECT_THAT(const_expr.has_bytes_value(), IsTrue());
EXPECT_EQ(const_expr.bytes_value(), "foo");
}
TEST(Constant, StringValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_string_value(), IsFalse());
EXPECT_THAT(const_expr.string_value(), IsEmpty());
const_expr.set_string_value("foo");
EXPECT_THAT(const_expr.has_string_value(), IsTrue());
EXPECT_EQ(const_expr.string_value(), "foo");
}
TEST(Constant, DurationValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_duration_value(), IsFalse());
EXPECT_EQ(const_expr.duration_value(), absl::ZeroDuration());
const_expr.set_duration_value(absl::ZeroDuration());
EXPECT_THAT(const_expr.has_duration_value(), IsTrue());
EXPECT_EQ(const_expr.duration_value(), absl::ZeroDuration());
}
TEST(Constant, TimestampValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_timestamp_value(), IsFalse());
EXPECT_EQ(const_expr.timestamp_value(), absl::UnixEpoch());
const_expr.set_timestamp_value(absl::UnixEpoch());
EXPECT_THAT(const_expr.has_timestamp_value(), IsTrue());
EXPECT_EQ(const_expr.timestamp_value(), absl::UnixEpoch());
}
TEST(Constant, Equality) {
EXPECT_EQ(Constant{}, Constant{});
Constant lhs_const_expr;
Constant rhs_const_expr;
lhs_const_expr.set_null_value();
rhs_const_expr.set_null_value();
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_bool_value(false);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_bool_value(false);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_int_value(0);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_int_value(0);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_uint_value(0);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_uint_value(0);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_double_value(0);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_double_value(0);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_bytes_value("foo");
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_bytes_value("foo");
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_string_value("foo");
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_string_value("foo");
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_duration_value(absl::ZeroDuration());
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_duration_value(absl::ZeroDuration());
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_timestamp_value(absl::UnixEpoch());
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_timestamp_value(absl::UnixEpoch());
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
}
std::string Stringify(const Constant& constant) {
return absl::StrFormat("%v", constant);
}
TEST(Constant, HasAbslStringify) {
EXPECT_TRUE(absl::HasAbslStringify<Constant>::value);
}
TEST(Constant, AbslStringify) {
Constant constant;
EXPECT_EQ(Stringify(constant), "<unspecified>");
constant.set_null_value();
EXPECT_EQ(Stringify(constant), "null");
constant.set_bool_value(true);
EXPECT_EQ(Stringify(constant), "true");
constant.set_int_value(1);
EXPECT_EQ(Stringify(constant), "1");
constant.set_uint_value(1);
EXPECT_EQ(Stringify(constant), "1u");
constant.set_double_value(1);
EXPECT_EQ(Stringify(constant), "1.0");
constant.set_double_value(1.1);
EXPECT_EQ(Stringify(constant), "1.1");
constant.set_double_value(NAN);
EXPECT_EQ(Stringify(constant), "nan");
constant.set_double_value(INFINITY);
EXPECT_EQ(Stringify(constant), "+infinity");
constant.set_double_value(-INFINITY);
EXPECT_EQ(Stringify(constant), "-infinity");
constant.set_bytes_value("foo");
EXPECT_EQ(Stringify(constant), "b\"foo\"");
constant.set_string_value("foo");
EXPECT_EQ(Stringify(constant), "\"foo\"");
constant.set_duration_value(absl::Seconds(1));
EXPECT_EQ(Stringify(constant), "duration(\"1s\")");
constant.set_timestamp_value(absl::UnixEpoch() + absl::Seconds(1));
EXPECT_EQ(Stringify(constant), "timestamp(\"1970-01-01T00:00:01Z\")");
}
}
} | 4 |
#ifndef THIRD_PARTY_CEL_CPP_BASE_AST_INTERNAL_EXPR_H_
#define THIRD_PARTY_CEL_CPP_BASE_AST_INTERNAL_EXPR_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/ast.h"
#include "common/constant.h"
#include "common/expr.h"
namespace cel::ast_internal {
using NullValue = std::nullptr_t;
using Bytes = cel::BytesConstant;
using Constant = cel::Constant;
using ConstantKind = cel::ConstantKind;
using Ident = cel::IdentExpr;
using Expr = cel::Expr;
using ExprKind = cel::ExprKind;
using Select = cel::SelectExpr;
using Call = cel::CallExpr;
using CreateList = cel::ListExpr;
using CreateStruct = cel::StructExpr;
using Comprehension = cel::ComprehensionExpr;
class Extension {
public:
class Version {
public:
Version() : major_(0), minor_(0) {}
Version(int64_t major, int64_t minor) : major_(major), minor_(minor) {}
Version(const Version& other) = default;
Version(Version&& other) = default;
Version& operator=(const Version& other) = default;
Version& operator=(Version&& other) = default;
static const Version& DefaultInstance();
int64_t major() const { return major_; }
void set_major(int64_t val) { major_ = val; }
int64_t minor() const { return minor_; }
void set_minor(int64_t val) { minor_ = val; }
bool operator==(const Version& other) const {
return major_ == other.major_ && minor_ == other.minor_;
}
bool operator!=(const Version& other) const { return !operator==(other); }
private:
int64_t major_;
int64_t minor_;
};
enum class Component {
kUnspecified,
kParser,
kTypeChecker,
kRuntime
};
static const Extension& DefaultInstance();
Extension() = default;
Extension(std::string id, std::unique_ptr<Version> version,
std::vector<Component> affected_components)
: id_(std::move(id)),
affected_components_(std::move(affected_components)),
version_(std::move(version)) {}
Extension(const Extension& other);
Extension(Extension&& other) = default;
Extension& operator=(const Extension& other);
Extension& operator=(Extension&& other) = default;
const std::string& id() const { return id_; }
void set_id(std::string id) { id_ = std::move(id); }
const std::vector<Component>& affected_components() const {
return affected_components_;
}
std::vector<Component>& mutable_affected_components() {
return affected_components_;
}
const Version& version() const {
if (version_ == nullptr) {
return Version::DefaultInstance();
}
return *version_;
}
Version& mutable_version() {
if (version_ == nullptr) {
version_ = std::make_unique<Version>();
}
return *version_;
}
void set_version(std::unique_ptr<Version> version) {
version_ = std::move(version);
}
bool operator==(const Extension& other) const {
return id_ == other.id_ &&
affected_components_ == other.affected_components_ &&
version() == other.version();
}
bool operator!=(const Extension& other) const { return !operator==(other); }
private:
std::string id_;
std::vector<Component> affected_components_;
std::unique_ptr<Version> version_;
};
class SourceInfo {
public:
SourceInfo() = default;
SourceInfo(std::string syntax_version, std::string location,
std::vector<int32_t> line_offsets,
absl::flat_hash_map<int64_t, int32_t> positions,
absl::flat_hash_map<int64_t, Expr> macro_calls,
std::vector<Extension> extensions)
: syntax_version_(std::move(syntax_version)),
location_(std::move(location)),
line_offsets_(std::move(line_offsets)),
positions_(std::move(positions)),
macro_calls_(std::move(macro_calls)),
extensions_(std::move(extensions)) {}
void set_syntax_version(std::string syntax_version) {
syntax_version_ = std::move(syntax_version);
}
void set_location(std::string location) { location_ = std::move(location); }
void set_line_offsets(std::vector<int32_t> line_offsets) {
line_offsets_ = std::move(line_offsets);
}
void set_positions(absl::flat_hash_map<int64_t, int32_t> positions) {
positions_ = std::move(positions);
}
void set_macro_calls(absl::flat_hash_map<int64_t, Expr> macro_calls) {
macro_calls_ = std::move(macro_calls);
}
const std::string& syntax_version() const { return syntax_version_; }
const std::string& location() const { return location_; }
const std::vector<int32_t>& line_offsets() const { return line_offsets_; }
std::vector<int32_t>& mutable_line_offsets() { return line_offsets_; }
const absl::flat_hash_map<int64_t, int32_t>& positions() const {
return positions_;
}
absl::flat_hash_map<int64_t, int32_t>& mutable_positions() {
return positions_;
}
const absl::flat_hash_map<int64_t, Expr>& macro_calls() const {
return macro_calls_;
}
absl::flat_hash_map<int64_t, Expr>& mutable_macro_calls() {
return macro_calls_;
}
bool operator==(const SourceInfo& other) const {
return syntax_version_ == other.syntax_version_ &&
location_ == other.location_ &&
line_offsets_ == other.line_offsets_ &&
positions_ == other.positions_ &&
macro_calls_ == other.macro_calls_ &&
extensions_ == other.extensions_;
}
bool operator!=(const SourceInfo& other) const { return !operator==(other); }
const std::vector<Extension>& extensions() const { return extensions_; }
std::vector<Extension>& mutable_extensions() { return extensions_; }
private:
std::string syntax_version_;
std::string location_;
std::vector<int32_t> line_offsets_;
absl::flat_hash_map<int64_t, int32_t> positions_;
absl::flat_hash_map<int64_t, Expr> macro_calls_;
std::vector<Extension> extensions_;
};
class ParsedExpr {
public:
ParsedExpr() = default;
ParsedExpr(Expr expr, SourceInfo source_info)
: expr_(std::move(expr)), source_info_(std::move(source_info)) {}
ParsedExpr(ParsedExpr&& rhs) = default;
ParsedExpr& operator=(ParsedExpr&& rhs) = default;
void set_expr(Expr expr) { expr_ = std::move(expr); }
void set_source_info(SourceInfo source_info) {
source_info_ = std::move(source_info);
}
const Expr& expr() const { return expr_; }
Expr& mutable_expr() { return expr_; }
const SourceInfo& source_info() const { return source_info_; }
SourceInfo& mutable_source_info() { return source_info_; }
private:
Expr expr_;
SourceInfo source_info_;
};
enum class PrimitiveType {
kPrimitiveTypeUnspecified = 0,
kBool = 1,
kInt64 = 2,
kUint64 = 3,
kDouble = 4,
kString = 5,
kBytes = 6,
};
enum class WellKnownType {
kWellKnownTypeUnspecified = 0,
kAny = 1,
kTimestamp = 2,
kDuration = 3,
};
class Type;
class ListType {
public:
ListType() = default;
ListType(const ListType& rhs)
: elem_type_(std::make_unique<Type>(rhs.elem_type())) {}
ListType& operator=(const ListType& rhs) {
elem_type_ = std::make_unique<Type>(rhs.elem_type());
return *this;
}
ListType(ListType&& rhs) = default;
ListType& operator=(ListType&& rhs) = default;
explicit ListType(std::unique_ptr<Type> elem_type)
: elem_type_(std::move(elem_type)) {}
void set_elem_type(std::unique_ptr<Type> elem_type) {
elem_type_ = std::move(elem_type);
}
bool has_elem_type() const { return elem_type_ != nullptr; }
const Type& elem_type() const;
Type& mutable_elem_type() {
if (elem_type_ == nullptr) {
elem_type_ = std::make_unique<Type>();
}
return *elem_type_;
}
bool operator==(const ListType& other) const;
private:
std::unique_ptr<Type> elem_type_;
};
class MapType {
public:
MapType() = default;
MapType(std::unique_ptr<Type> key_type, std::unique_ptr<Type> value_type)
: key_type_(std::move(key_type)), value_type_(std::move(value_type)) {}
MapType(const MapType& rhs)
: key_type_(std::make_unique<Type>(rhs.key_type())),
value_type_(std::make_unique<Type>(rhs.value_type())) {}
MapType& operator=(const MapType& rhs) {
key_type_ = std::make_unique<Type>(rhs.key_type());
value_type_ = std::make_unique<Type>(rhs.value_type());
return *this;
}
MapType(MapType&& rhs) = default;
MapType& operator=(MapType&& rhs) = default;
void set_key_type(std::unique_ptr<Type> key_type) {
key_type_ = std::move(key_type);
}
void set_value_type(std::unique_ptr<Type> value_type) {
value_type_ = std::move(value_type);
}
bool has_key_type() const { return key_type_ != nullptr; }
bool has_value_type() const { return value_type_ != nullptr; }
const Type& key_type() const;
const Type& value_type() const;
bool operator==(const MapType& other) const;
Type& mutable_key_type() {
if (key_type_ == nullptr) {
key_type_ = std::make_unique<Type>();
}
return *key_type_;
}
Type& mutable_value_type() {
if (value_type_ == nullptr) {
value_type_ = std::make_unique<Type>();
}
return *value_type_;
}
private:
std::unique_ptr<Type> key_type_;
std::unique_ptr<Type> value_type_;
};
class FunctionType {
public:
FunctionType() = default;
FunctionType(std::unique_ptr<Type> result_type, std::vector<Type> arg_types);
FunctionType(const FunctionType& other);
FunctionType& operator=(const FunctionType& other);
FunctionType(FunctionType&&) = default;
FunctionType& operator=(FunctionType&&) = default;
void set_result_type(std::unique_ptr<Type> result_type) {
result_type_ = std::move(result_type);
}
void set_arg_types(std::vector<Type> arg_types);
bool has_result_type() const { return result_type_ != nullptr; }
const Type& result_type() const;
Type& mutable_result_type() {
if (result_type_ == nullptr) {
result_type_ = std::make_unique<Type>();
}
return *result_type_;
}
const std::vector<Type>& arg_types() const { return arg_types_; }
std::vector<Type>& mutable_arg_types() { return arg_types_; }
bool operator==(const FunctionType& other) const;
private:
std::unique_ptr<Type> result_type_;
std::vector<Type> arg_types_;
};
class AbstractType {
public:
AbstractType() = default;
AbstractType(std::string name, std::vector<Type> parameter_types);
void set_name(std::string name) { name_ = std::move(name); }
void set_parameter_types(std::vector<Type> parameter_types);
const std::string& name() const { return name_; }
const std::vector<Type>& parameter_types() const { return parameter_types_; }
std::vector<Type>& mutable_parameter_types() { return parameter_types_; }
bool operator==(const AbstractType& other) const;
private:
std::string name_;
std::vector<Type> parameter_types_;
};
class PrimitiveTypeWrapper {
public:
explicit PrimitiveTypeWrapper(PrimitiveType type) : type_(std::move(type)) {}
void set_type(PrimitiveType type) { type_ = std::move(type); }
const PrimitiveType& type() const { return type_; }
PrimitiveType& mutable_type() { return type_; }
bool operator==(const PrimitiveTypeWrapper& other) const {
return type_ == other.type_;
}
private:
PrimitiveType type_;
};
class MessageType {
public:
MessageType() = default;
explicit MessageType(std::string type) : type_(std::move(type)) {}
void set_type(std::string type) { type_ = std::move(type); }
const std::string& type() const { return type_; }
bool operator==(const MessageType& other) const {
return type_ == other.type_;
}
private:
std::string type_;
};
class ParamType {
public:
ParamType() = default;
explicit ParamType(std::string type) : type_(std::move(type)) {}
void set_type(std::string type) { type_ = std::move(type); }
const std::string& type() const { return type_; }
bool operator==(const ParamType& other) const { return type_ == other.type_; }
private:
std::string type_;
};
enum class ErrorType { kErrorTypeValue = 0 };
using DynamicType = absl::monostate;
using TypeKind =
absl::variant<DynamicType, NullValue, PrimitiveType, PrimitiveTypeWrapper,
WellKnownType, ListType, MapType, FunctionType, MessageType,
ParamType, std::unique_ptr<Type>, ErrorType, AbstractType>;
class Type {
public:
Type() = default;
explicit Type(TypeKind type_kind) : type_kind_(std::move(type_kind)) {}
Type(const Type& other);
Type& operator=(const Type& other);
Type(Type&&) = default;
Type& operator=(Type&&) = default;
void set_type_kind(TypeKind type_kind) { type_kind_ = std::move(type_kind); }
const TypeKind& type_kind() const { return type_kind_; }
TypeKind& mutable_type_kind() { return type_kind_; }
bool has_dyn() const {
return absl::holds_alternative<DynamicType>(type_kind_);
}
bool has_null() const {
return absl::holds_alternative<NullValue>(type_kind_);
}
bool has_primitive() const {
return absl::holds_alternative<PrimitiveType>(type_kind_);
}
bool has_wrapper() const {
return absl::holds_alternative<PrimitiveTypeWrapper>(type_kind_);
}
bool has_well_known() const {
return absl::holds_alternative<WellKnownType>(type_kind_);
}
bool has_list_type() const {
return absl::holds_alternative<ListType>(type_kind_);
}
bool has_map_type() const {
return absl::holds_alternative<MapType>(type_kind_);
}
bool has_function() const {
return absl::holds_alternative<FunctionType>(type_kind_);
}
bool has_message_type() const {
return absl::holds_alternative<MessageType>(type_kind_);
}
bool has_type_param() const {
return absl::holds_alternative<ParamType>(type_kind_);
}
bool has_type() const {
return absl::holds_alternative<std::unique_ptr<Type>>(type_kind_);
}
bool has_error() const {
return absl::holds_alternative<ErrorType>(type_kind_);
}
bool has_abstract_type() const {
return absl::holds_alternative<AbstractType>(type_kind_);
}
NullValue null() const {
auto* value = absl::get_if<NullValue>(&type_kind_);
if (value != nullptr) {
return *value;
}
return nullptr;
}
PrimitiveType primitive() const {
auto* value = absl::get_if<PrimitiveType>(&type_kind_);
if (value != nullptr) {
return *value;
}
return PrimitiveType::kPrimitiveTypeUnspecified;
}
PrimitiveType wrapper() const {
auto* value = absl::get_if<PrimitiveTypeWrapper>(&type_kind_);
if (value != nullptr) {
return value->type();
}
return PrimitiveType::kPrimitiveTypeUnspecified;
}
WellKnownType well_known() const {
auto* value = absl::get_if<WellKnownType>(&type_kind_);
if (value != nullptr) {
return *value;
}
return WellKnownType::kWellKnownTypeUnspecified;
}
const ListType& list_type() const {
auto* value = absl::get_if<ListType>(&type_kind_);
if (value != nullptr) {
return *value;
}
static const ListType* default_list_type = new ListType();
return *default_list_type;
}
const MapType& map_type() const {
auto* value = absl::get_if<MapType>(&type_kind_);
if (value != nullptr) {
return *value;
}
static const MapType* default_map_type = new MapType();
return *default_map_type;
}
const FunctionType& function() const {
auto* value = absl::get_if<FunctionType>(&type_kind_);
if (value != nullptr) {
return *value;
}
static const FunctionType* default_function_type = new FunctionType();
return *default_function_type;
}
const MessageType& message_type() const {
auto* value = absl::get_if<MessageType>(&type_kind_);
if (value != nullptr) {
return *value;
}
static const MessageType* default_message_type = new MessageType();
return *default_message_type;
}
const ParamType& type_param() const {
auto* value = absl::get_if<ParamType>(&type_kind_);
if (value != nullptr) {
return *value;
}
static const ParamType* default_param_type = new ParamType();
return *default_param_type;
}
const Type& type() const;
ErrorType error_type() const {
auto* value = absl::get_if<ErrorType>(&type_kind_);
if (value != nullptr) {
return *value;
}
return ErrorType::kErrorTypeValue;
}
const AbstractType& abstract_type() const {
auto* value = absl::get_if<AbstractType>(&type_kind_);
if (value != nullptr) {
return *value;
}
static const AbstractType* default_abstract_type = new AbstractType();
return *default_abstract_type;
}
bool operator==(const Type& other) const {
return type_kind_ == other.type_kind_;
}
private:
TypeKind type_kind_;
};
class Reference {
public:
Reference() = default;
Reference(std::string name, std::vector<std::string> overload_id,
Constant value)
: name_(std::move(name)),
overload_id_(std::move(overload_id)),
value_(std::move(value)) {}
void set_name(std::string name) { name_ = std::move(name); }
void set_overload_id(std::vector<std::string> overload_id) {
overload_id_ = std::move(overload_id);
}
void set_value(Constant value) { value_ = std::move(value); }
const std::string& name() const { return name_; }
const std::vector<std::string>& overload_id() const { return overload_id_; }
const Constant& value() const {
if (value_.has_value()) {
return value_.value();
}
static const Constant* default_constant = new Constant;
return *default_constant;
}
std::vector<std::string>& mutable_overload_id() { return overload_id_; }
Constant& mutable_value() {
if (!value_.has_value()) {
value_.emplace();
}
return *value_;
}
bool has_value() const { return value_.has_value(); }
bool operator==(const Reference& other) const {
return name_ == other.name_ && overload_id_ == other.overload_id_ &&
value() == other.value();
}
private:
std::string name_;
std::vector<std::string> overload_id_;
absl::optional<Constant> value_;
};
class CheckedExpr {
public:
CheckedExpr() = default;
CheckedExpr(absl::flat_hash_map<int64_t, Reference> reference_map,
absl::flat_hash_map<int64_t, Type> type_map,
SourceInfo source_info, std::string expr_version, Expr expr)
: reference_map_(std::move(reference_map)),
type_map_(std::move(type_map)),
source_info_(std::move(source_info)),
expr_version_(std::move(expr_version)),
expr_(std::move(expr)) {}
CheckedExpr(CheckedExpr&& rhs) = default;
CheckedExpr& operator=(CheckedExpr&& rhs) = default;
void set_reference_map(
absl::flat_hash_map<int64_t, Reference> reference_map) {
reference_map_ = std::move(reference_map);
}
void set_type_map(absl::flat_hash_map<int64_t, Type> type_map) {
type_map_ = std::move(type_map);
}
void set_source_info(SourceInfo source_info) {
source_info_ = std::move(source_info);
}
void set_expr_version(std::string expr_version) {
expr_version_ = std::move(expr_version);
}
void set_expr(Expr expr) { expr_ = std::move(expr); }
const absl::flat_hash_map<int64_t, Reference>& reference_map() const {
return reference_map_;
}
absl::flat_hash_map<int64_t, Reference>& mutable_reference_map() {
return reference_map_;
}
const absl::flat_hash_map<int64_t, Type>& type_map() const {
return type_map_;
}
absl::flat_hash_map<int64_t, Type>& mutable_type_map() { return type_map_; }
const SourceInfo& source_info() const { return source_info_; }
SourceInfo& mutable_source_info() { return source_info_; }
const std::string& expr_version() const { return expr_version_; }
std::string& mutable_expr_version() { return expr_version_; }
const Expr& expr() const { return expr_; }
Expr& mutable_expr() { return expr_; }
private:
absl::flat_hash_map<int64_t, Reference> reference_map_;
absl::flat_hash_map<int64_t, Type> type_map_;
SourceInfo source_info_;
std::string expr_version_;
Expr expr_;
};
inline FunctionType::FunctionType(std::unique_ptr<Type> result_type,
std::vector<Type> arg_types)
: result_type_(std::move(result_type)), arg_types_(std::move(arg_types)) {}
inline void FunctionType::set_arg_types(std::vector<Type> arg_types) {
arg_types_ = std::move(arg_types);
}
inline AbstractType::AbstractType(std::string name,
std::vector<Type> parameter_types)
: name_(std::move(name)), parameter_types_(std::move(parameter_types)) {}
inline void AbstractType::set_parameter_types(
std::vector<Type> parameter_types) {
parameter_types_ = std::move(parameter_types);
}
inline bool AbstractType::operator==(const AbstractType& other) const {
return name_ == other.name_ && parameter_types_ == other.parameter_types_;
}
}
#endif
#include "base/ast_internal/expr.h"
#include <memory>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/functional/overload.h"
#include "absl/types/variant.h"
namespace cel::ast_internal {
namespace {
const Type& default_type() {
static absl::NoDestructor<Type> type;
return *type;
}
TypeKind CopyImpl(const TypeKind& other) {
return absl::visit(absl::Overload(
[](const std::unique_ptr<Type>& other) -> TypeKind {
return std::make_unique<Type>(*other);
},
[](const auto& other) -> TypeKind {
return other;
}),
other);
}
}
const Extension::Version& Extension::Version::DefaultInstance() {
static absl::NoDestructor<Version> instance;
return *instance;
}
const Extension& Extension::DefaultInstance() {
static absl::NoDestructor<Extension> instance;
return *instance;
}
Extension::Extension(const Extension& other)
: id_(other.id_),
affected_components_(other.affected_components_),
version_(std::make_unique<Version>(*other.version_)) {}
Extension& Extension::operator=(const Extension& other) {
id_ = other.id_;
affected_components_ = other.affected_components_;
version_ = std::make_unique<Version>(*other.version_);
return *this;
}
const Type& ListType::elem_type() const {
if (elem_type_ != nullptr) {
return *elem_type_;
}
return default_type();
}
bool ListType::operator==(const ListType& other) const {
return elem_type() == other.elem_type();
}
const Type& MapType::key_type() const {
if (key_type_ != nullptr) {
return *key_type_;
}
return default_type();
}
const Type& MapType::value_type() const {
if (value_type_ != nullptr) {
return *value_type_;
}
return default_type();
}
bool MapType::operator==(const MapType& other) const {
return key_type() == other.key_type() && value_type() == other.value_type();
}
const Type& FunctionType::result_type() const {
if (result_type_ != nullptr) {
return *result_type_;
}
return default_type();
}
bool FunctionType::operator==(const FunctionType& other) const {
return result_type() == other.result_type() && arg_types_ == other.arg_types_;
}
const Type& Type::type() const {
auto* v | #include "base/ast_internal/expr.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/types/variant.h"
#include "common/ast.h"
#include "internal/testing.h"
namespace cel {
namespace ast_internal {
namespace {
TEST(AstTest, ParsedExpr) {
ParsedExpr parsed_expr;
auto& expr = parsed_expr.mutable_expr();
expr.set_id(1);
expr.mutable_ident_expr().set_name("name");
auto& source_info = parsed_expr.mutable_source_info();
source_info.set_syntax_version("syntax_version");
source_info.set_location("location");
source_info.set_line_offsets({1, 2, 3});
source_info.set_positions({{1, 1}, {2, 2}});
ASSERT_TRUE(absl::holds_alternative<Ident>(parsed_expr.expr().kind()));
ASSERT_EQ(absl::get<Ident>(parsed_expr.expr().kind()).name(), "name");
ASSERT_EQ(parsed_expr.source_info().syntax_version(), "syntax_version");
ASSERT_EQ(parsed_expr.source_info().location(), "location");
EXPECT_THAT(parsed_expr.source_info().line_offsets(),
testing::UnorderedElementsAre(1, 2, 3));
EXPECT_THAT(
parsed_expr.source_info().positions(),
testing::UnorderedElementsAre(testing::Pair(1, 1), testing::Pair(2, 2)));
}
TEST(AstTest, ListTypeMutableConstruction) {
ListType type;
type.mutable_elem_type() = Type(PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.elem_type().type_kind()),
PrimitiveType::kBool);
}
TEST(AstTest, MapTypeMutableConstruction) {
MapType type;
type.mutable_key_type() = Type(PrimitiveType::kBool);
type.mutable_value_type() = Type(PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.key_type().type_kind()),
PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.value_type().type_kind()),
PrimitiveType::kBool);
}
TEST(AstTest, MapTypeComparatorKeyType) {
MapType type;
type.mutable_key_type() = Type(PrimitiveType::kBool);
EXPECT_FALSE(type == MapType());
}
TEST(AstTest, MapTypeComparatorValueType) {
MapType type;
type.mutable_value_type() = Type(PrimitiveType::kBool);
EXPECT_FALSE(type == MapType());
}
TEST(AstTest, FunctionTypeMutableConstruction) {
FunctionType type;
type.mutable_result_type() = Type(PrimitiveType::kBool);
EXPECT_EQ(absl::get<PrimitiveType>(type.result_type().type_kind()),
PrimitiveType::kBool);
}
TEST(AstTest, FunctionTypeComparatorArgTypes) {
FunctionType type;
type.mutable_arg_types().emplace_back(Type());
EXPECT_FALSE(type == FunctionType());
}
TEST(AstTest, CheckedExpr) {
CheckedExpr checked_expr;
auto& expr = checked_expr.mutable_expr();
expr.set_id(1);
expr.mutable_ident_expr().set_name("name");
auto& source_info = checked_expr.mutable_source_info();
source_info.set_syntax_version("syntax_version");
source_info.set_location("location");
source_info.set_line_offsets({1, 2, 3});
source_info.set_positions({{1, 1}, {2, 2}});
checked_expr.set_expr_version("expr_version");
checked_expr.mutable_type_map().insert(
{1, Type(PrimitiveType(PrimitiveType::kBool))});
ASSERT_TRUE(absl::holds_alternative<Ident>(checked_expr.expr().kind()));
ASSERT_EQ(absl::get<Ident>(checked_expr.expr().kind()).name(), "name");
ASSERT_EQ(checked_expr.source_info().syntax_version(), "syntax_version");
ASSERT_EQ(checked_expr.source_info().location(), "location");
EXPECT_THAT(checked_expr.source_info().line_offsets(),
testing::UnorderedElementsAre(1, 2, 3));
EXPECT_THAT(
checked_expr.source_info().positions(),
testing::UnorderedElementsAre(testing::Pair(1, 1), testing::Pair(2, 2)));
EXPECT_EQ(checked_expr.expr_version(), "expr_version");
}
TEST(AstTest, ListTypeDefaults) { EXPECT_EQ(ListType().elem_type(), Type()); }
TEST(AstTest, MapTypeDefaults) {
EXPECT_EQ(MapType().key_type(), Type());
EXPECT_EQ(MapType().value_type(), Type());
}
TEST(AstTest, FunctionTypeDefaults) {
EXPECT_EQ(FunctionType().result_type(), Type());
}
TEST(AstTest, TypeDefaults) {
EXPECT_EQ(Type().null(), nullptr);
EXPECT_EQ(Type().primitive(), PrimitiveType::kPrimitiveTypeUnspecified);
EXPECT_EQ(Type().wrapper(), PrimitiveType::kPrimitiveTypeUnspecified);
EXPECT_EQ(Type().well_known(), WellKnownType::kWellKnownTypeUnspecified);
EXPECT_EQ(Type().list_type(), ListType());
EXPECT_EQ(Type().map_type(), MapType());
EXPECT_EQ(Type().function(), FunctionType());
EXPECT_EQ(Type().message_type(), MessageType());
EXPECT_EQ(Type().type_param(), ParamType());
EXPECT_EQ(Type().type(), Type());
EXPECT_EQ(Type().error_type(), ErrorType());
EXPECT_EQ(Type().abstract_type(), AbstractType());
}
TEST(AstTest, TypeComparatorTest) {
Type type;
type.set_type_kind(std::make_unique<Type>(PrimitiveType::kBool));
EXPECT_FALSE(type.type() == Type());
}
TEST(AstTest, ExprMutableConstruction) {
Expr expr;
expr.mutable_const_expr().set_bool_value(true);
ASSERT_TRUE(expr.has_const_expr());
EXPECT_TRUE(expr.const_expr().bool_value());
expr.mutable_ident_expr().set_name("expr");
ASSERT_TRUE(expr.has_ident_expr());
EXPECT_FALSE(expr.has_const_expr());
EXPECT_EQ(expr.ident_expr().name(), "expr");
expr.mutable_select_expr().set_field("field");
ASSERT_TRUE(expr.has_select_expr());
EXPECT_FALSE(expr.has_ident_expr());
EXPECT_EQ(expr.select_expr().field(), "field");
expr.mutable_call_expr().set_function("function");
ASSERT_TRUE(expr.has_call_expr());
EXPECT_FALSE(expr.has_select_expr());
EXPECT_EQ(expr.call_expr().function(), "function");
expr.mutable_list_expr();
EXPECT_TRUE(expr.has_list_expr());
EXPECT_FALSE(expr.has_call_expr());
expr.mutable_struct_expr().set_name("name");
ASSERT_TRUE(expr.has_struct_expr());
EXPECT_EQ(expr.struct_expr().name(), "name");
EXPECT_FALSE(expr.has_list_expr());
expr.mutable_comprehension_expr().set_accu_var("accu_var");
ASSERT_TRUE(expr.has_comprehension_expr());
EXPECT_FALSE(expr.has_list_expr());
EXPECT_EQ(expr.comprehension_expr().accu_var(), "accu_var");
}
TEST(AstTest, ReferenceConstantDefaultValue) {
Reference reference;
EXPECT_EQ(reference.value(), Constant());
}
TEST(AstTest, TypeCopyable) {
Type type = Type(PrimitiveType::kBool);
Type type2 = type;
EXPECT_TRUE(type2.has_primitive());
EXPECT_EQ(type2, type);
type = Type(ListType(std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
EXPECT_TRUE(type2.has_list_type());
EXPECT_EQ(type2, type);
type = Type(MapType(std::make_unique<Type>(PrimitiveType::kBool),
std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
EXPECT_TRUE(type2.has_map_type());
EXPECT_EQ(type2, type);
type = Type(FunctionType(std::make_unique<Type>(PrimitiveType::kBool), {}));
type2 = type;
EXPECT_TRUE(type2.has_function());
EXPECT_EQ(type2, type);
type = Type(AbstractType("optional", {Type(PrimitiveType::kBool)}));
type2 = type;
EXPECT_TRUE(type2.has_abstract_type());
EXPECT_EQ(type2, type);
}
TEST(AstTest, TypeMoveable) {
Type type = Type(PrimitiveType::kBool);
Type type2 = type;
Type type3 = std::move(type);
EXPECT_TRUE(type2.has_primitive());
EXPECT_EQ(type2, type3);
type = Type(ListType(std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_list_type());
EXPECT_EQ(type2, type3);
type = Type(MapType(std::make_unique<Type>(PrimitiveType::kBool),
std::make_unique<Type>(PrimitiveType::kBool)));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_map_type());
EXPECT_EQ(type2, type3);
type = Type(FunctionType(std::make_unique<Type>(PrimitiveType::kBool), {}));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_function());
EXPECT_EQ(type2, type3);
type = Type(AbstractType("optional", {Type(PrimitiveType::kBool)}));
type2 = type;
type3 = std::move(type);
EXPECT_TRUE(type2.has_abstract_type());
EXPECT_EQ(type2, type3);
}
TEST(AstTest, NestedTypeKindCopyAssignable) {
ListType list_type(std::make_unique<Type>(PrimitiveType::kBool));
ListType list_type2;
list_type2 = list_type;
EXPECT_EQ(list_type2, list_type);
MapType map_type(std::make_unique<Type>(PrimitiveType::kBool),
std::make_unique<Type>(PrimitiveType::kBool));
MapType map_type2;
map_type2 = map_type;
AbstractType abstract_type(
"abstract", {Type(PrimitiveType::kBool), Type(PrimitiveType::kBool)});
AbstractType abstract_type2;
abstract_type2 = abstract_type;
EXPECT_EQ(abstract_type2, abstract_type);
FunctionType function_type(
std::make_unique<Type>(PrimitiveType::kBool),
{Type(PrimitiveType::kBool), Type(PrimitiveType::kBool)});
FunctionType function_type2;
function_type2 = function_type;
EXPECT_EQ(function_type2, function_type);
}
TEST(AstTest, ExtensionSupported) {
SourceInfo source_info;
source_info.mutable_extensions().push_back(
Extension("constant_folding", nullptr, {}));
EXPECT_EQ(source_info.extensions()[0],
Extension("constant_folding", nullptr, {}));
}
TEST(AstTest, ExtensionEquality) {
Extension extension1("constant_folding", nullptr, {});
EXPECT_EQ(extension1, Extension("constant_folding", nullptr, {}));
EXPECT_NE(extension1,
Extension("constant_folding",
std::make_unique<Extension::Version>(1, 0), {}));
EXPECT_NE(extension1, Extension("constant_folding", nullptr,
{Extension::Component::kRuntime}));
EXPECT_EQ(extension1,
Extension("constant_folding",
std::make_unique<Extension::Version>(0, 0), {}));
}
}
}
} | 5 |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_DECL_H_
#define THIRD_PARTY_CEL_CPP_COMMON_DECL_H_
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/constant.h"
#include "common/type.h"
#include "internal/status_macros.h"
namespace cel {
class VariableDecl;
class OverloadDecl;
class FunctionDecl;
class VariableDecl final {
public:
VariableDecl() = default;
VariableDecl(const VariableDecl&) = default;
VariableDecl(VariableDecl&&) = default;
VariableDecl& operator=(const VariableDecl&) = default;
VariableDecl& operator=(VariableDecl&&) = default;
ABSL_MUST_USE_RESULT const std::string& name() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return name_;
}
void set_name(std::string name) { name_ = std::move(name); }
void set_name(absl::string_view name) {
name_.assign(name.data(), name.size());
}
void set_name(const char* name) { set_name(absl::NullSafeStringView(name)); }
ABSL_MUST_USE_RESULT std::string release_name() {
std::string released;
released.swap(name_);
return released;
}
ABSL_MUST_USE_RESULT const Type& type() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return type_;
}
ABSL_MUST_USE_RESULT Type& mutable_type() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return type_;
}
void set_type(Type type) { mutable_type() = std::move(type); }
ABSL_MUST_USE_RESULT bool has_value() const { return value_.has_value(); }
ABSL_MUST_USE_RESULT const Constant& value() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return has_value() ? *value_ : Constant::default_instance();
}
Constant& mutable_value() ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!has_value()) {
value_.emplace();
}
return *value_;
}
void set_value(absl::optional<Constant> value) { value_ = std::move(value); }
void set_value(Constant value) { mutable_value() = std::move(value); }
ABSL_MUST_USE_RESULT Constant release_value() {
absl::optional<Constant> released;
released.swap(value_);
return std::move(released).value_or(Constant{});
}
private:
std::string name_;
Type type_ = DynType{};
absl::optional<Constant> value_;
};
inline VariableDecl MakeVariableDecl(std::string name, Type type) {
VariableDecl variable_decl;
variable_decl.set_name(std::move(name));
variable_decl.set_type(std::move(type));
return variable_decl;
}
inline VariableDecl MakeConstantVariableDecl(std::string name, Type type,
Constant value) {
VariableDecl variable_decl;
variable_decl.set_name(std::move(name));
variable_decl.set_type(std::move(type));
variable_decl.set_value(std::move(value));
return variable_decl;
}
inline bool operator==(const VariableDecl& lhs, const VariableDecl& rhs) {
return lhs.name() == rhs.name() && lhs.type() == rhs.type() &&
lhs.has_value() == rhs.has_value() && lhs.value() == rhs.value();
}
inline bool operator!=(const VariableDecl& lhs, const VariableDecl& rhs) {
return !operator==(lhs, rhs);
}
class OverloadDecl final {
public:
OverloadDecl() = default;
OverloadDecl(const OverloadDecl&) = default;
OverloadDecl(OverloadDecl&&) = default;
OverloadDecl& operator=(const OverloadDecl&) = default;
OverloadDecl& operator=(OverloadDecl&&) = default;
ABSL_MUST_USE_RESULT const std::string& id() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return id_;
}
void set_id(std::string id) { id_ = std::move(id); }
void set_id(absl::string_view id) { id_.assign(id.data(), id.size()); }
void set_id(const char* id) { set_id(absl::NullSafeStringView(id)); }
ABSL_MUST_USE_RESULT std::string release_id() {
std::string released;
released.swap(id_);
return released;
}
ABSL_MUST_USE_RESULT const std::vector<Type>& args() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return args_;
}
ABSL_MUST_USE_RESULT std::vector<Type>& mutable_args()
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return args_;
}
ABSL_MUST_USE_RESULT std::vector<Type> release_args() {
std::vector<Type> released;
released.swap(mutable_args());
return released;
}
ABSL_MUST_USE_RESULT const Type& result() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return result_;
}
ABSL_MUST_USE_RESULT Type& mutable_result() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return result_;
}
void set_result(Type result) { mutable_result() = std::move(result); }
ABSL_MUST_USE_RESULT bool member() const { return member_; }
void set_member(bool member) { member_ = member; }
absl::flat_hash_set<std::string> GetTypeParams() const;
private:
std::string id_;
std::vector<Type> args_;
Type result_ = DynType{};
bool member_ = false;
};
inline bool operator==(const OverloadDecl& lhs, const OverloadDecl& rhs) {
return lhs.id() == rhs.id() && absl::c_equal(lhs.args(), rhs.args()) &&
lhs.result() == rhs.result() && lhs.member() == rhs.member();
}
inline bool operator!=(const OverloadDecl& lhs, const OverloadDecl& rhs) {
return !operator==(lhs, rhs);
}
template <typename... Args>
OverloadDecl MakeOverloadDecl(std::string id, Type result, Args&&... args) {
OverloadDecl overload_decl;
overload_decl.set_id(std::move(id));
overload_decl.set_result(std::move(result));
overload_decl.set_member(false);
auto& mutable_args = overload_decl.mutable_args();
mutable_args.reserve(sizeof...(Args));
(mutable_args.push_back(std::forward<Args>(args)), ...);
return overload_decl;
}
template <typename... Args>
OverloadDecl MakeMemberOverloadDecl(std::string id, Type result,
Args&&... args) {
OverloadDecl overload_decl;
overload_decl.set_id(std::move(id));
overload_decl.set_result(std::move(result));
overload_decl.set_member(true);
auto& mutable_args = overload_decl.mutable_args();
mutable_args.reserve(sizeof...(Args));
(mutable_args.push_back(std::forward<Args>(args)), ...);
return overload_decl;
}
struct OverloadDeclHash {
using is_transparent = void;
size_t operator()(const OverloadDecl& overload_decl) const {
return (*this)(overload_decl.id());
}
size_t operator()(absl::string_view id) const { return absl::HashOf(id); }
};
struct OverloadDeclEqualTo {
using is_transparent = void;
bool operator()(const OverloadDecl& lhs, const OverloadDecl& rhs) const {
return (*this)(lhs.id(), rhs.id());
}
bool operator()(const OverloadDecl& lhs, absl::string_view rhs) const {
return (*this)(lhs.id(), rhs);
}
bool operator()(absl::string_view lhs, const OverloadDecl& rhs) const {
return (*this)(lhs, rhs.id());
}
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs == rhs;
}
};
using OverloadDeclHashSet =
absl::flat_hash_set<OverloadDecl, OverloadDeclHash, OverloadDeclEqualTo>;
template <typename... Overloads>
absl::StatusOr<FunctionDecl> MakeFunctionDecl(std::string name,
Overloads&&... overloads);
class FunctionDecl final {
public:
FunctionDecl() = default;
FunctionDecl(const FunctionDecl&) = default;
FunctionDecl(FunctionDecl&&) = default;
FunctionDecl& operator=(const FunctionDecl&) = default;
FunctionDecl& operator=(FunctionDecl&&) = default;
ABSL_MUST_USE_RESULT const std::string& name() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return name_;
}
void set_name(std::string name) { name_ = std::move(name); }
void set_name(absl::string_view name) {
name_.assign(name.data(), name.size());
}
void set_name(const char* name) { set_name(absl::NullSafeStringView(name)); }
ABSL_MUST_USE_RESULT std::string release_name() {
std::string released;
released.swap(name_);
return released;
}
absl::Status AddOverload(const OverloadDecl& overload) {
absl::Status status;
AddOverloadImpl(overload, status);
return status;
}
absl::Status AddOverload(OverloadDecl&& overload) {
absl::Status status;
AddOverloadImpl(std::move(overload), status);
return status;
}
ABSL_MUST_USE_RESULT const OverloadDeclHashSet& overloads() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
return overloads_;
}
ABSL_MUST_USE_RESULT OverloadDeclHashSet release_overloads() {
OverloadDeclHashSet released;
released.swap(overloads_);
return released;
}
private:
template <typename... Overloads>
friend absl::StatusOr<FunctionDecl> MakeFunctionDecl(
std::string name, Overloads&&... overloads);
void AddOverloadImpl(const OverloadDecl& overload, absl::Status& status);
void AddOverloadImpl(OverloadDecl&& overload, absl::Status& status);
std::string name_;
OverloadDeclHashSet overloads_;
};
inline bool operator==(const FunctionDecl& lhs, const FunctionDecl& rhs) {
return lhs.name() == rhs.name() &&
absl::c_equal(lhs.overloads(), rhs.overloads());
}
inline bool operator!=(const FunctionDecl& lhs, const FunctionDecl& rhs) {
return !operator==(lhs, rhs);
}
template <typename... Overloads>
absl::StatusOr<FunctionDecl> MakeFunctionDecl(std::string name,
Overloads&&... overloads) {
FunctionDecl function_decl;
function_decl.set_name(std::move(name));
function_decl.overloads_.reserve(sizeof...(Overloads));
absl::Status status;
(function_decl.AddOverloadImpl(std::forward<Overloads>(overloads), status),
...);
CEL_RETURN_IF_ERROR(status);
return function_decl;
}
namespace common_internal {
bool TypeIsAssignable(TypeView to, TypeView from);
}
}
#endif
#include "common/decl.h"
#include <cstddef>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "common/casting.h"
#include "common/type.h"
#include "common/type_kind.h"
namespace cel {
namespace common_internal {
bool TypeIsAssignable(TypeView to, TypeView from) {
if (to == from) {
return true;
}
const auto to_kind = to.kind();
if (to_kind == TypeKind::kDyn) {
return true;
}
switch (to_kind) {
case TypeKind::kBoolWrapper:
return TypeIsAssignable(NullTypeView{}, from) ||
TypeIsAssignable(BoolTypeView{}, from);
case TypeKind::kIntWrapper:
return TypeIsAssignable(NullTypeView{}, from) ||
TypeIsAssignable(IntTypeView{}, from);
case TypeKind::kUintWrapper:
return TypeIsAssignable(NullTypeView{}, from) ||
TypeIsAssignable(UintTypeView{}, from);
case TypeKind::kDoubleWrapper:
return TypeIsAssignable(NullTypeView{}, from) ||
TypeIsAssignable(DoubleTypeView{}, from);
case TypeKind::kBytesWrapper:
return TypeIsAssignable(NullTypeView{}, from) ||
TypeIsAssignable(BytesTypeView{}, from);
case TypeKind::kStringWrapper:
return TypeIsAssignable(NullTypeView{}, from) ||
TypeIsAssignable(StringTypeView{}, from);
default:
break;
}
const auto from_kind = from.kind();
if (to_kind != from_kind || to.name() != from.name()) {
return false;
}
const auto& to_params = to.parameters();
const auto& from_params = from.parameters();
const auto params_size = to_params.size();
if (params_size != from_params.size()) {
return false;
}
for (size_t i = 0; i < params_size; ++i) {
if (!TypeIsAssignable(to_params[i], from_params[i])) {
return false;
}
}
return true;
}
}
namespace {
bool SignaturesOverlap(const OverloadDecl& lhs, const OverloadDecl& rhs) {
if (lhs.member() != rhs.member()) {
return false;
}
const auto& lhs_args = lhs.args();
const auto& rhs_args = rhs.args();
const auto args_size = lhs_args.size();
if (args_size != rhs_args.size()) {
return false;
}
bool args_overlap = true;
for (size_t i = 0; i < args_size; ++i) {
args_overlap =
args_overlap &&
(common_internal::TypeIsAssignable(lhs_args[i], rhs_args[i]) ||
common_internal::TypeIsAssignable(rhs_args[i], lhs_args[i]));
}
return args_overlap;
}
template <typename Overload>
void AddOverloadInternal(OverloadDeclHashSet& overloads, Overload&& overload,
absl::Status& status) {
if (!status.ok()) {
return;
}
if (auto it = overloads.find(overload.id()); it != overloads.end()) {
status = absl::AlreadyExistsError(
absl::StrCat("overload already exists: ", overload.id()));
return;
}
for (const auto& existing : overloads) {
if (SignaturesOverlap(overload, existing)) {
status = absl::InvalidArgumentError(
absl::StrCat("overload signature collision: ", existing.id(),
" collides with ", overload.id()));
return;
}
}
const auto inserted =
overloads.insert(std::forward<Overload>(overload)).second;
ABSL_DCHECK(inserted);
}
void CollectTypeParams(absl::flat_hash_set<std::string>& type_params,
TypeView type) {
const auto kind = type.kind();
switch (kind) {
case TypeKind::kList: {
const auto& list_type = cel::Cast<ListTypeView>(type);
CollectTypeParams(type_params, list_type.element());
} break;
case TypeKind::kMap: {
const auto& map_type = cel::Cast<MapTypeView>(type);
CollectTypeParams(type_params, map_type.key());
CollectTypeParams(type_params, map_type.value());
} break;
case TypeKind::kOpaque: {
const auto& opaque_type = cel::Cast<OpaqueTypeView>(type);
for (const auto& param : opaque_type.parameters()) {
CollectTypeParams(type_params, param);
}
} break;
case TypeKind::kFunction: {
const auto& function_type = cel::Cast<FunctionTypeView>(type);
CollectTypeParams(type_params, function_type.result());
for (const auto& arg : function_type.args()) {
CollectTypeParams(type_params, arg);
}
} break;
case TypeKind::kTypeParam:
type_params.emplace(cel::Cast<TypeParamTypeView>(type).name());
break;
default:
break;
}
}
}
absl::flat_hash_set<std::string> OverloadDecl::GetTypeParams() const {
absl::flat_hash_set<std::string> type_params;
CollectTypeParams(type_params, result());
for (const auto& arg : args()) {
CollectTypeParams(type_params, arg);
}
return type_params;
}
void FunctionDecl::AddOverloadImpl(const OverloadDecl& overload,
absl::Status& status) {
AddOverloadInternal(overloads_, overload, status);
}
void FunctionDecl::AddOverloadImpl(OverloadDecl&& overload,
absl::Status& status) {
AddOverloadInternal(overloads_, std::move(overload), status);
}
} | #include "common/decl.h"
#include "absl/status/status.h"
#include "common/constant.h"
#include "common/memory.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::ElementsAre;
using testing::IsEmpty;
using testing::UnorderedElementsAre;
using cel::internal::StatusIs;
TEST(VariableDecl, Name) {
VariableDecl variable_decl;
EXPECT_THAT(variable_decl.name(), IsEmpty());
variable_decl.set_name("foo");
EXPECT_EQ(variable_decl.name(), "foo");
EXPECT_EQ(variable_decl.release_name(), "foo");
EXPECT_THAT(variable_decl.name(), IsEmpty());
}
TEST(VariableDecl, Type) {
VariableDecl variable_decl;
EXPECT_EQ(variable_decl.type(), DynType{});
variable_decl.set_type(StringType{});
EXPECT_EQ(variable_decl.type(), StringType{});
}
TEST(VariableDecl, Value) {
VariableDecl variable_decl;
EXPECT_FALSE(variable_decl.has_value());
EXPECT_EQ(variable_decl.value(), Constant{});
Constant value;
value.set_bool_value(true);
variable_decl.set_value(value);
EXPECT_TRUE(variable_decl.has_value());
EXPECT_EQ(variable_decl.value(), value);
EXPECT_EQ(variable_decl.release_value(), value);
EXPECT_EQ(variable_decl.value(), Constant{});
}
Constant MakeBoolConstant(bool value) {
Constant constant;
constant.set_bool_value(value);
return constant;
}
TEST(VariableDecl, Equality) {
VariableDecl variable_decl;
EXPECT_EQ(variable_decl, VariableDecl{});
variable_decl.mutable_value().set_bool_value(true);
EXPECT_NE(variable_decl, VariableDecl{});
EXPECT_EQ(MakeVariableDecl("foo", StringType{}),
MakeVariableDecl("foo", StringType{}));
EXPECT_EQ(MakeVariableDecl("foo", StringType{}),
MakeVariableDecl("foo", StringType{}));
EXPECT_EQ(
MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true)),
MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true)));
EXPECT_EQ(
MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true)),
MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true)));
}
TEST(OverloadDecl, Id) {
OverloadDecl overload_decl;
EXPECT_THAT(overload_decl.id(), IsEmpty());
overload_decl.set_id("foo");
EXPECT_EQ(overload_decl.id(), "foo");
EXPECT_EQ(overload_decl.release_id(), "foo");
EXPECT_THAT(overload_decl.id(), IsEmpty());
}
TEST(OverloadDecl, Result) {
OverloadDecl overload_decl;
EXPECT_EQ(overload_decl.result(), DynType{});
overload_decl.set_result(StringType{});
EXPECT_EQ(overload_decl.result(), StringType{});
}
TEST(OverloadDecl, Args) {
OverloadDecl overload_decl;
EXPECT_THAT(overload_decl.args(), IsEmpty());
overload_decl.mutable_args().push_back(StringType{});
EXPECT_THAT(overload_decl.args(), ElementsAre(StringType{}));
EXPECT_THAT(overload_decl.release_args(), ElementsAre(StringType{}));
EXPECT_THAT(overload_decl.args(), IsEmpty());
}
TEST(OverloadDecl, Member) {
OverloadDecl overload_decl;
EXPECT_FALSE(overload_decl.member());
overload_decl.set_member(true);
EXPECT_TRUE(overload_decl.member());
}
TEST(OverloadDecl, Equality) {
OverloadDecl overload_decl;
EXPECT_EQ(overload_decl, OverloadDecl{});
overload_decl.set_member(true);
EXPECT_NE(overload_decl, OverloadDecl{});
}
TEST(OverloadDecl, GetTypeParams) {
auto memory_manager = MemoryManagerRef::ReferenceCounting();
auto overload_decl = MakeOverloadDecl(
"foo", ListType(memory_manager, TypeParamType(memory_manager, "A")),
MapType(memory_manager, TypeParamType(memory_manager, "B"),
TypeParamType(memory_manager, "C")),
OpaqueType(memory_manager, "bar",
{FunctionType(memory_manager,
TypeParamType(memory_manager, "D"), {})}));
EXPECT_THAT(overload_decl.GetTypeParams(),
UnorderedElementsAre("A", "B", "C", "D"));
}
TEST(FunctionDecl, Name) {
FunctionDecl function_decl;
EXPECT_THAT(function_decl.name(), IsEmpty());
function_decl.set_name("foo");
EXPECT_EQ(function_decl.name(), "foo");
EXPECT_EQ(function_decl.release_name(), "foo");
EXPECT_THAT(function_decl.name(), IsEmpty());
}
TEST(FunctionDecl, Overloads) {
ASSERT_OK_AND_ASSIGN(
auto function_decl,
MakeFunctionDecl(
"hello", MakeOverloadDecl("foo", StringType{}, StringType{}),
MakeMemberOverloadDecl("bar", StringType{}, StringType{})));
EXPECT_THAT(function_decl.AddOverload(
MakeOverloadDecl("baz", DynType{}, StringType{})),
StatusIs(absl::StatusCode::kInvalidArgument));
}
using common_internal::TypeIsAssignable;
TEST(TypeIsAssignable, BoolWrapper) {
EXPECT_TRUE(TypeIsAssignable(BoolWrapperTypeView{}, BoolWrapperTypeView{}));
EXPECT_TRUE(TypeIsAssignable(BoolWrapperTypeView{}, NullTypeView{}));
EXPECT_TRUE(TypeIsAssignable(BoolWrapperTypeView{}, BoolTypeView{}));
EXPECT_FALSE(TypeIsAssignable(BoolWrapperTypeView{}, DurationTypeView{}));
}
TEST(TypeIsAssignable, IntWrapper) {
EXPECT_TRUE(TypeIsAssignable(IntWrapperTypeView{}, IntWrapperTypeView{}));
EXPECT_TRUE(TypeIsAssignable(IntWrapperTypeView{}, NullTypeView{}));
EXPECT_TRUE(TypeIsAssignable(IntWrapperTypeView{}, IntTypeView{}));
EXPECT_FALSE(TypeIsAssignable(IntWrapperTypeView{}, DurationTypeView{}));
}
TEST(TypeIsAssignable, UintWrapper) {
EXPECT_TRUE(TypeIsAssignable(UintWrapperTypeView{}, UintWrapperTypeView{}));
EXPECT_TRUE(TypeIsAssignable(UintWrapperTypeView{}, NullTypeView{}));
EXPECT_TRUE(TypeIsAssignable(UintWrapperTypeView{}, UintTypeView{}));
EXPECT_FALSE(TypeIsAssignable(UintWrapperTypeView{}, DurationTypeView{}));
}
TEST(TypeIsAssignable, DoubleWrapper) {
EXPECT_TRUE(
TypeIsAssignable(DoubleWrapperTypeView{}, DoubleWrapperTypeView{}));
EXPECT_TRUE(TypeIsAssignable(DoubleWrapperTypeView{}, NullTypeView{}));
EXPECT_TRUE(TypeIsAssignable(DoubleWrapperTypeView{}, DoubleTypeView{}));
EXPECT_FALSE(TypeIsAssignable(DoubleWrapperTypeView{}, DurationTypeView{}));
}
TEST(TypeIsAssignable, BytesWrapper) {
EXPECT_TRUE(TypeIsAssignable(BytesWrapperTypeView{}, BytesWrapperTypeView{}));
EXPECT_TRUE(TypeIsAssignable(BytesWrapperTypeView{}, NullTypeView{}));
EXPECT_TRUE(TypeIsAssignable(BytesWrapperTypeView{}, BytesTypeView{}));
EXPECT_FALSE(TypeIsAssignable(BytesWrapperTypeView{}, DurationTypeView{}));
}
TEST(TypeIsAssignable, StringWrapper) {
EXPECT_TRUE(
TypeIsAssignable(StringWrapperTypeView{}, StringWrapperTypeView{}));
EXPECT_TRUE(TypeIsAssignable(StringWrapperTypeView{}, NullTypeView{}));
EXPECT_TRUE(TypeIsAssignable(StringWrapperTypeView{}, StringTypeView{}));
EXPECT_FALSE(TypeIsAssignable(StringWrapperTypeView{}, DurationTypeView{}));
}
TEST(TypeIsAssignable, Complex) {
auto memory_manager = MemoryManagerRef::ReferenceCounting();
EXPECT_TRUE(TypeIsAssignable(OptionalType(memory_manager, DynTypeView{}),
OptionalType(memory_manager, StringTypeView{})));
EXPECT_FALSE(
TypeIsAssignable(OptionalType(memory_manager, BoolTypeView{}),
OptionalType(memory_manager, StringTypeView{})));
}
}
} | 6 |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPE_FACTORY_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPE_FACTORY_H_
#include "absl/strings/string_view.h"
#include "common/memory.h"
#include "common/sized_input_view.h"
#include "common/type.h"
namespace cel {
namespace common_internal {
class PiecewiseValueManager;
}
class TypeFactory {
public:
virtual ~TypeFactory() = default;
virtual MemoryManagerRef GetMemoryManager() const = 0;
ListType CreateListType(TypeView element);
MapType CreateMapType(TypeView key, TypeView value);
StructType CreateStructType(absl::string_view name);
OpaqueType CreateOpaqueType(absl::string_view name,
const SizedInputView<TypeView>& parameters);
OptionalType CreateOptionalType(TypeView parameter);
ListTypeView GetDynListType();
MapTypeView GetDynDynMapType();
MapTypeView GetStringDynMapType();
OptionalTypeView GetDynOptionalType();
NullType GetNullType() { return NullType{}; }
ErrorType GetErrorType() { return ErrorType{}; }
DynType GetDynType() { return DynType{}; }
AnyType GetAnyType() { return AnyType{}; }
BoolType GetBoolType() { return BoolType{}; }
IntType GetIntType() { return IntType{}; }
UintType GetUintType() { return UintType{}; }
DoubleType GetDoubleType() { return DoubleType{}; }
StringType GetStringType() { return StringType{}; }
BytesType GetBytesType() { return BytesType{}; }
DurationType GetDurationType() { return DurationType{}; }
TimestampType GetTimestampType() { return TimestampType{}; }
TypeType GetTypeType() { return TypeType{}; }
UnknownType GetUnknownType() { return UnknownType{}; }
BoolWrapperType GetBoolWrapperType() { return BoolWrapperType{}; }
BytesWrapperType GetBytesWrapperType() { return BytesWrapperType{}; }
DoubleWrapperType GetDoubleWrapperType() { return DoubleWrapperType{}; }
IntWrapperType GetIntWrapperType() { return IntWrapperType{}; }
StringWrapperType GetStringWrapperType() { return StringWrapperType{}; }
UintWrapperType GetUintWrapperType() { return UintWrapperType{}; }
Type GetJsonValueType() { return DynType{}; }
ListType GetJsonListType() { return ListType(GetDynListType()); }
MapType GetJsonMapType() { return MapType(GetStringDynMapType()); }
protected:
friend class common_internal::PiecewiseValueManager;
virtual ListType CreateListTypeImpl(TypeView element) = 0;
virtual MapType CreateMapTypeImpl(TypeView key, TypeView value) = 0;
virtual StructType CreateStructTypeImpl(absl::string_view name) = 0;
virtual OpaqueType CreateOpaqueTypeImpl(
absl::string_view name, const SizedInputView<TypeView>& parameters) = 0;
};
}
#endif
#include "common/type_factory.h"
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/sized_input_view.h"
#include "common/type.h"
#include "common/type_kind.h"
#include "common/types/type_cache.h"
#include "internal/names.h"
namespace cel {
namespace {
using common_internal::ListTypeCacheMap;
using common_internal::MapTypeCacheMap;
using common_internal::OpaqueTypeCacheMap;
using common_internal::ProcessLocalTypeCache;
using common_internal::StructTypeCacheMap;
bool IsValidMapKeyType(TypeView type) {
switch (type.kind()) {
case TypeKind::kDyn:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kError:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kBool:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kInt:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kUint:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kString:
return true;
default:
return false;
}
}
}
ListType TypeFactory::CreateListType(TypeView element) {
if (auto list_type = ProcessLocalTypeCache::Get()->FindListType(element);
list_type.has_value()) {
return ListType(*list_type);
}
return CreateListTypeImpl(element);
}
MapType TypeFactory::CreateMapType(TypeView key, TypeView value) {
ABSL_DCHECK(IsValidMapKeyType(key)) << key;
if (auto map_type = ProcessLocalTypeCache::Get()->FindMapType(key, value);
map_type.has_value()) {
return MapType(*map_type);
}
return CreateMapTypeImpl(key, value);
}
StructType TypeFactory::CreateStructType(absl::string_view name) {
ABSL_DCHECK(internal::IsValidRelativeName(name)) << name;
return CreateStructTypeImpl(name);
}
OpaqueType TypeFactory::CreateOpaqueType(
absl::string_view name, const SizedInputView<TypeView>& parameters) {
ABSL_DCHECK(internal::IsValidRelativeName(name)) << name;
if (auto opaque_type =
ProcessLocalTypeCache::Get()->FindOpaqueType(name, parameters);
opaque_type.has_value()) {
return OpaqueType(*opaque_type);
}
return CreateOpaqueTypeImpl(name, parameters);
}
OptionalType TypeFactory::CreateOptionalType(TypeView parameter) {
return Cast<OptionalType>(CreateOpaqueType(OptionalType::kName, {parameter}));
}
ListTypeView TypeFactory::GetDynListType() {
return ProcessLocalTypeCache::Get()->GetDynListType();
}
MapTypeView TypeFactory::GetDynDynMapType() {
return ProcessLocalTypeCache::Get()->GetDynDynMapType();
}
MapTypeView TypeFactory::GetStringDynMapType() {
return ProcessLocalTypeCache::Get()->GetStringDynMapType();
}
OptionalTypeView TypeFactory::GetDynOptionalType() {
return ProcessLocalTypeCache::Get()->GetDynOptionalType();
}
} | #include "common/type_factory.h"
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include "absl/types/optional.h"
#include "common/memory.h"
#include "common/memory_testing.h"
#include "common/type.h"
#include "common/type_introspector.h"
#include "common/type_manager.h"
#include "common/types/type_cache.h"
#include "internal/testing.h"
namespace cel {
namespace {
using common_internal::ProcessLocalTypeCache;
using testing::_;
using testing::Eq;
using testing::Ne;
using testing::TestParamInfo;
using testing::TestWithParam;
enum class ThreadSafety {
kCompatible,
kSafe,
};
std::ostream& operator<<(std::ostream& out, ThreadSafety thread_safety) {
switch (thread_safety) {
case ThreadSafety::kCompatible:
return out << "THREAD_SAFE";
case ThreadSafety::kSafe:
return out << "THREAD_COMPATIBLE";
}
}
class TypeFactoryTest
: public common_internal::ThreadCompatibleMemoryTest<ThreadSafety> {
public:
void SetUp() override {
ThreadCompatibleMemoryTest::SetUp();
switch (thread_safety()) {
case ThreadSafety::kCompatible:
type_manager_ = NewThreadCompatibleTypeManager(
memory_manager(),
NewThreadCompatibleTypeIntrospector(memory_manager()));
break;
case ThreadSafety::kSafe:
type_manager_ = NewThreadSafeTypeManager(
memory_manager(), NewThreadSafeTypeIntrospector(memory_manager()));
break;
}
}
void TearDown() override { Finish(); }
void Finish() {
type_manager_.reset();
ThreadCompatibleMemoryTest::Finish();
}
TypeFactory& type_factory() const { return **type_manager_; }
ThreadSafety thread_safety() const { return std::get<1>(GetParam()); }
static std::string ToString(
TestParamInfo<std::tuple<MemoryManagement, ThreadSafety>> param) {
std::ostringstream out;
out << std::get<0>(param.param) << "_" << std::get<1>(param.param);
return out.str();
}
private:
absl::optional<Shared<TypeManager>> type_manager_;
};
TEST_P(TypeFactoryTest, ListType) {
auto list_type1 = type_factory().CreateListType(StringType());
EXPECT_THAT(type_factory().CreateListType(StringType()), Eq(list_type1));
EXPECT_THAT(type_factory().CreateListType(BytesType()), Ne(list_type1));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto list_type2 = type_factory().CreateListType(struct_type1);
EXPECT_THAT(type_factory().CreateListType(struct_type1), Eq(list_type2));
EXPECT_THAT(type_factory().CreateListType(struct_type2), Ne(list_type2));
EXPECT_EQ(type_factory().GetDynListType(),
ProcessLocalTypeCache::Get()->GetDynListType());
}
TEST_P(TypeFactoryTest, MapType) {
auto map_type1 = type_factory().CreateMapType(StringType(), BytesType());
EXPECT_THAT(type_factory().CreateMapType(StringType(), BytesType()),
Eq(map_type1));
EXPECT_THAT(type_factory().CreateMapType(StringType(), StringType()),
Ne(map_type1));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto map_type2 = type_factory().CreateMapType(StringType(), struct_type1);
EXPECT_THAT(type_factory().CreateMapType(StringType(), struct_type1),
Eq(map_type2));
EXPECT_THAT(type_factory().CreateMapType(StringType(), struct_type2),
Ne(map_type2));
EXPECT_EQ(type_factory().GetDynDynMapType(),
ProcessLocalTypeCache::Get()->GetDynDynMapType());
EXPECT_EQ(type_factory().GetStringDynMapType(),
ProcessLocalTypeCache::Get()->GetStringDynMapType());
}
TEST_P(TypeFactoryTest, MapTypeInvalidKeyType) {
EXPECT_DEBUG_DEATH(type_factory().CreateMapType(DoubleType(), BytesType()),
_);
}
TEST_P(TypeFactoryTest, StructType) {
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
EXPECT_THAT(type_factory().CreateStructType("test.Struct1"),
Eq(struct_type1));
EXPECT_THAT(type_factory().CreateStructType("test.Struct2"),
Ne(struct_type1));
}
TEST_P(TypeFactoryTest, StructTypeBadName) {
EXPECT_DEBUG_DEATH(type_factory().CreateStructType("test.~"), _);
}
TEST_P(TypeFactoryTest, OpaqueType) {
auto opaque_type1 =
type_factory().CreateOpaqueType("test.Struct1", {BytesType()});
EXPECT_THAT(type_factory().CreateOpaqueType("test.Struct1", {BytesType()}),
Eq(opaque_type1));
EXPECT_THAT(type_factory().CreateOpaqueType("test.Struct2", {}),
Ne(opaque_type1));
}
TEST_P(TypeFactoryTest, OpaqueTypeBadName) {
EXPECT_DEBUG_DEATH(type_factory().CreateOpaqueType("test.~", {}), _);
}
TEST_P(TypeFactoryTest, OptionalType) {
auto optional_type1 = type_factory().CreateOptionalType(StringType());
EXPECT_THAT(type_factory().CreateOptionalType(StringType()),
Eq(optional_type1));
EXPECT_THAT(type_factory().CreateOptionalType(BytesType()),
Ne(optional_type1));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto optional_type2 = type_factory().CreateOptionalType(struct_type1);
EXPECT_THAT(type_factory().CreateOptionalType(struct_type1),
Eq(optional_type2));
EXPECT_THAT(type_factory().CreateOptionalType(struct_type2),
Ne(optional_type2));
EXPECT_EQ(type_factory().GetDynOptionalType(),
ProcessLocalTypeCache::Get()->GetDynOptionalType());
}
INSTANTIATE_TEST_SUITE_P(
TypeFactoryTest, TypeFactoryTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
::testing::Values(ThreadSafety::kCompatible,
ThreadSafety::kSafe)),
TypeFactoryTest::ToString);
}
} | 7 |
#ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_AST_TRAVERSE_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_AST_TRAVERSE_H_
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "eval/public/ast_visitor.h"
namespace google::api::expr::runtime {
struct TraversalOptions {
bool use_comprehension_callbacks;
TraversalOptions() : use_comprehension_callbacks(false) {}
};
void AstTraverse(const google::api::expr::v1alpha1::Expr* expr,
const google::api::expr::v1alpha1::SourceInfo* source_info,
AstVisitor* visitor,
TraversalOptions options = TraversalOptions());
}
#endif
#include "eval/public/ast_traverse.h"
#include <stack>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/log/absl_log.h"
#include "absl/types/variant.h"
#include "eval/public/ast_visitor.h"
#include "eval/public/source_position.h"
namespace google::api::expr::runtime {
using google::api::expr::v1alpha1::Expr;
using google::api::expr::v1alpha1::SourceInfo;
using Ident = google::api::expr::v1alpha1::Expr::Ident;
using Select = google::api::expr::v1alpha1::Expr::Select;
using Call = google::api::expr::v1alpha1::Expr::Call;
using CreateList = google::api::expr::v1alpha1::Expr::CreateList;
using CreateStruct = google::api::expr::v1alpha1::Expr::CreateStruct;
using Comprehension = google::api::expr::v1alpha1::Expr::Comprehension;
namespace {
struct ArgRecord {
const Expr* expr;
const SourceInfo* source_info;
const Expr* calling_expr;
int call_arg;
};
struct ComprehensionRecord {
const Expr* expr;
const SourceInfo* source_info;
const Comprehension* comprehension;
const Expr* comprehension_expr;
ComprehensionArg comprehension_arg;
bool use_comprehension_callbacks;
};
struct ExprRecord {
const Expr* expr;
const SourceInfo* source_info;
};
using StackRecordKind =
absl::variant<ExprRecord, ArgRecord, ComprehensionRecord>;
struct StackRecord {
public:
ABSL_ATTRIBUTE_UNUSED static constexpr int kNotCallArg = -1;
static constexpr int kTarget = -2;
StackRecord(const Expr* e, const SourceInfo* info) {
ExprRecord record;
record.expr = e;
record.source_info = info;
record_variant = record;
}
StackRecord(const Expr* e, const SourceInfo* info,
const Comprehension* comprehension,
const Expr* comprehension_expr,
ComprehensionArg comprehension_arg,
bool use_comprehension_callbacks) {
if (use_comprehension_callbacks) {
ComprehensionRecord record;
record.expr = e;
record.source_info = info;
record.comprehension = comprehension;
record.comprehension_expr = comprehension_expr;
record.comprehension_arg = comprehension_arg;
record.use_comprehension_callbacks = use_comprehension_callbacks;
record_variant = record;
return;
}
ArgRecord record;
record.expr = e;
record.source_info = info;
record.calling_expr = comprehension_expr;
record.call_arg = comprehension_arg;
record_variant = record;
}
StackRecord(const Expr* e, const SourceInfo* info, const Expr* call,
int argnum) {
ArgRecord record;
record.expr = e;
record.source_info = info;
record.calling_expr = call;
record.call_arg = argnum;
record_variant = record;
}
StackRecordKind record_variant;
bool visited = false;
};
struct PreVisitor {
void operator()(const ExprRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
visitor->PreVisitExpr(expr, &position);
switch (expr->expr_kind_case()) {
case Expr::kConstExpr:
visitor->PreVisitConst(&expr->const_expr(), expr, &position);
break;
case Expr::kIdentExpr:
visitor->PreVisitIdent(&expr->ident_expr(), expr, &position);
break;
case Expr::kSelectExpr:
visitor->PreVisitSelect(&expr->select_expr(), expr, &position);
break;
case Expr::kCallExpr:
visitor->PreVisitCall(&expr->call_expr(), expr, &position);
break;
case Expr::kListExpr:
visitor->PreVisitCreateList(&expr->list_expr(), expr, &position);
break;
case Expr::kStructExpr:
visitor->PreVisitCreateStruct(&expr->struct_expr(), expr, &position);
break;
case Expr::kComprehensionExpr:
visitor->PreVisitComprehension(&expr->comprehension_expr(), expr,
&position);
break;
default:
break;
}
}
void operator()(const ArgRecord&) {}
void operator()(const ComprehensionRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
visitor->PreVisitComprehensionSubexpression(
expr, record.comprehension, record.comprehension_arg, &position);
}
AstVisitor* visitor;
};
void PreVisit(const StackRecord& record, AstVisitor* visitor) {
absl::visit(PreVisitor{visitor}, record.record_variant);
}
struct PostVisitor {
void operator()(const ExprRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
switch (expr->expr_kind_case()) {
case Expr::kConstExpr:
visitor->PostVisitConst(&expr->const_expr(), expr, &position);
break;
case Expr::kIdentExpr:
visitor->PostVisitIdent(&expr->ident_expr(), expr, &position);
break;
case Expr::kSelectExpr:
visitor->PostVisitSelect(&expr->select_expr(), expr, &position);
break;
case Expr::kCallExpr:
visitor->PostVisitCall(&expr->call_expr(), expr, &position);
break;
case Expr::kListExpr:
visitor->PostVisitCreateList(&expr->list_expr(), expr, &position);
break;
case Expr::kStructExpr:
visitor->PostVisitCreateStruct(&expr->struct_expr(), expr, &position);
break;
case Expr::kComprehensionExpr:
visitor->PostVisitComprehension(&expr->comprehension_expr(), expr,
&position);
break;
default:
ABSL_LOG(ERROR) << "Unsupported Expr kind: " << expr->expr_kind_case();
}
visitor->PostVisitExpr(expr, &position);
}
void operator()(const ArgRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
if (record.call_arg == StackRecord::kTarget) {
visitor->PostVisitTarget(record.calling_expr, &position);
} else {
visitor->PostVisitArg(record.call_arg, record.calling_expr, &position);
}
}
void operator()(const ComprehensionRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
visitor->PostVisitComprehensionSubexpression(
expr, record.comprehension, record.comprehension_arg, &position);
}
AstVisitor* visitor;
};
void PostVisit(const StackRecord& record, AstVisitor* visitor) {
absl::visit(PostVisitor{visitor}, record.record_variant);
}
void PushSelectDeps(const Select* select_expr, const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
if (select_expr->has_operand()) {
stack->push(StackRecord(&select_expr->operand(), source_info));
}
}
void PushCallDeps(const Call* call_expr, const Expr* expr,
const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
const int arg_size = call_expr->args_size();
for (int i = arg_size - 1; i >= 0; --i) {
stack->push(StackRecord(&call_expr->args(i), source_info, expr, i));
}
if (call_expr->has_target()) {
stack->push(StackRecord(&call_expr->target(), source_info, expr,
StackRecord::kTarget));
}
}
void PushListDeps(const CreateList* list_expr, const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
const auto& elements = list_expr->elements();
for (auto it = elements.rbegin(); it != elements.rend(); ++it) {
const auto& element = *it;
stack->push(StackRecord(&element, source_info));
}
}
void PushStructDeps(const CreateStruct* struct_expr,
const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
const auto& entries = struct_expr->entries();
for (auto it = entries.rbegin(); it != entries.rend(); ++it) {
const auto& entry = *it;
if (entry.has_value()) {
stack->push(StackRecord(&entry.value(), source_info));
}
if (entry.has_map_key()) {
stack->push(StackRecord(&entry.map_key(), source_info));
}
}
}
void PushComprehensionDeps(const Comprehension* c, const Expr* expr,
const SourceInfo* source_info,
std::stack<StackRecord>* stack,
bool use_comprehension_callbacks) {
StackRecord iter_range(&c->iter_range(), source_info, c, expr, ITER_RANGE,
use_comprehension_callbacks);
StackRecord accu_init(&c->accu_init(), source_info, c, expr, ACCU_INIT,
use_comprehension_callbacks);
StackRecord loop_condition(&c->loop_condition(), source_info, c, expr,
LOOP_CONDITION, use_comprehension_callbacks);
StackRecord loop_step(&c->loop_step(), source_info, c, expr, LOOP_STEP,
use_comprehension_callbacks);
StackRecord result(&c->result(), source_info, c, expr, RESULT,
use_comprehension_callbacks);
stack->push(result);
stack->push(loop_step);
stack->push(loop_condition);
stack->push(accu_init);
stack->push(iter_range);
}
struct PushDepsVisitor {
void operator()(const ExprRecord& record) {
const Expr* expr = record.expr;
switch (expr->expr_kind_case()) {
case Expr::kSelectExpr:
PushSelectDeps(&expr->select_expr(), record.source_info, &stack);
break;
case Expr::kCallExpr:
PushCallDeps(&expr->call_expr(), expr, record.source_info, &stack);
break;
case Expr::kListExpr:
PushListDeps(&expr->list_expr(), record.source_info, &stack);
break;
case Expr::kStructExpr:
PushStructDeps(&expr->struct_expr(), record.source_info, &stack);
break;
case Expr::kComprehensionExpr:
PushComprehensionDeps(&expr->comprehension_expr(), expr,
record.source_info, &stack,
options.use_comprehension_callbacks);
break;
default:
break;
}
}
void operator()(const ArgRecord& record) {
stack.push(StackRecord(record.expr, record.source_info));
}
void operator()(const ComprehensionRecord& record) {
stack.push(StackRecord(record.expr, record.source_info));
}
std::stack<StackRecord>& stack;
const TraversalOptions& options;
};
void PushDependencies(const StackRecord& record, std::stack<StackRecord>& stack,
const TraversalOptions& options) {
absl::visit(PushDepsVisitor{stack, options}, record.record_variant);
}
}
void AstTraverse(const Expr* expr, const SourceInfo* source_info,
AstVisitor* visitor, TraversalOptions options) {
std::stack<StackRecord> stack;
stack.push(StackRecord(expr, source_info));
while (!stack.empty()) {
StackRecord& record = stack.top();
if (!record.visited) {
PreVisit(record, visitor);
PushDependencies(record, stack, options);
record.visited = true;
} else {
PostVisit(record, visitor);
stack.pop();
}
}
}
} | #include "eval/public/ast_traverse.h"
#include "eval/public/ast_visitor.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using google::api::expr::v1alpha1::Constant;
using google::api::expr::v1alpha1::Expr;
using google::api::expr::v1alpha1::SourceInfo;
using testing::_;
using Ident = google::api::expr::v1alpha1::Expr::Ident;
using Select = google::api::expr::v1alpha1::Expr::Select;
using Call = google::api::expr::v1alpha1::Expr::Call;
using CreateList = google::api::expr::v1alpha1::Expr::CreateList;
using CreateStruct = google::api::expr::v1alpha1::Expr::CreateStruct;
using Comprehension = google::api::expr::v1alpha1::Expr::Comprehension;
class MockAstVisitor : public AstVisitor {
public:
MOCK_METHOD(void, PreVisitExpr,
(const Expr* expr, const SourcePosition* position), (override));
MOCK_METHOD(void, PostVisitExpr,
(const Expr* expr, const SourcePosition* position), (override));
MOCK_METHOD(void, PreVisitConst,
(const Constant* const_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitConst,
(const Constant* const_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitIdent,
(const Ident* ident_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitIdent,
(const Ident* ident_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitSelect,
(const Select* select_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitSelect,
(const Select* select_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitCall,
(const Call* call_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitCall,
(const Call* call_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitComprehension,
(const Comprehension* comprehension_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitComprehension,
(const Comprehension* comprehension_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitComprehensionSubexpression,
(const Expr* expr, const Comprehension* comprehension_expr,
ComprehensionArg comprehension_arg,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitComprehensionSubexpression,
(const Expr* expr, const Comprehension* comprehension_expr,
ComprehensionArg comprehension_arg,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitTarget,
(const Expr* expr, const SourcePosition* position), (override));
MOCK_METHOD(void, PostVisitArg,
(int arg_num, const Expr* expr, const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitCreateList,
(const CreateList* list_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitCreateList,
(const CreateList* list_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitCreateStruct,
(const CreateStruct* struct_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitCreateStruct,
(const CreateStruct* struct_expr, const Expr* expr,
const SourcePosition* position),
(override));
};
TEST(AstCrawlerTest, CheckCrawlConstant) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto const_expr = expr.mutable_const_expr();
EXPECT_CALL(handler, PreVisitConst(const_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(const_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlIdent) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto ident_expr = expr.mutable_ident_expr();
EXPECT_CALL(handler, PreVisitIdent(ident_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlSelectNotCrashingPostVisitAbsentOperand) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto select_expr = expr.mutable_select_expr();
EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlSelect) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto select_expr = expr.mutable_select_expr();
auto operand = select_expr->mutable_operand();
auto ident_expr = operand->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PostVisitIdent(ident_expr, operand, _)).Times(1);
EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlCallNoReceiver) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto* call_expr = expr.mutable_call_expr();
Expr* arg0 = call_expr->add_args();
auto* const_expr = arg0->mutable_const_expr();
Expr* arg1 = call_expr->add_args();
auto* ident_expr = arg1->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitTarget(_, _)).Times(0);
EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlCallReceiver) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto* call_expr = expr.mutable_call_expr();
Expr* target = call_expr->mutable_target();
auto* target_ident = target->mutable_ident_expr();
Expr* arg0 = call_expr->add_args();
auto* const_expr = arg0->mutable_const_expr();
Expr* arg1 = call_expr->add_args();
auto* ident_expr = arg1->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(target_ident, target, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(target, _)).Times(1);
EXPECT_CALL(handler, PostVisitTarget(&expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlComprehension) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto c = expr.mutable_comprehension_expr();
auto iter_range = c->mutable_iter_range();
auto iter_range_expr = iter_range->mutable_const_expr();
auto accu_init = c->mutable_accu_init();
auto accu_init_expr = accu_init->mutable_ident_expr();
auto loop_condition = c->mutable_loop_condition();
auto loop_condition_expr = loop_condition->mutable_const_expr();
auto loop_step = c->mutable_loop_step();
auto loop_step_expr = loop_step->mutable_ident_expr();
auto result = c->mutable_result();
auto result_expr = result->mutable_const_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitComprehension(c, &expr, _)).Times(1);
EXPECT_CALL(handler,
PreVisitComprehensionSubexpression(iter_range, c, ITER_RANGE, _))
.Times(1);
EXPECT_CALL(handler, PostVisitConst(iter_range_expr, iter_range, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(iter_range, c, ITER_RANGE, _))
.Times(1);
EXPECT_CALL(handler,
PreVisitComprehensionSubexpression(accu_init, c, ACCU_INIT, _))
.Times(1);
EXPECT_CALL(handler, PostVisitIdent(accu_init_expr, accu_init, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(accu_init, c, ACCU_INIT, _))
.Times(1);
EXPECT_CALL(handler, PreVisitComprehensionSubexpression(loop_condition, c,
LOOP_CONDITION, _))
.Times(1);
EXPECT_CALL(handler, PostVisitConst(loop_condition_expr, loop_condition, _))
.Times(1);
EXPECT_CALL(handler, PostVisitComprehensionSubexpression(loop_condition, c,
LOOP_CONDITION, _))
.Times(1);
EXPECT_CALL(handler,
PreVisitComprehensionSubexpression(loop_step, c, LOOP_STEP, _))
.Times(1);
EXPECT_CALL(handler, PostVisitIdent(loop_step_expr, loop_step, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(loop_step, c, LOOP_STEP, _))
.Times(1);
EXPECT_CALL(handler, PreVisitComprehensionSubexpression(result, c, RESULT, _))
.Times(1);
EXPECT_CALL(handler, PostVisitConst(result_expr, result, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(result, c, RESULT, _))
.Times(1);
EXPECT_CALL(handler, PostVisitComprehension(c, &expr, _)).Times(1);
TraversalOptions opts;
opts.use_comprehension_callbacks = true;
AstTraverse(&expr, &source_info, &handler, opts);
}
TEST(AstCrawlerTest, CheckCrawlComprehensionLegacyCallbacks) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto c = expr.mutable_comprehension_expr();
auto iter_range = c->mutable_iter_range();
auto iter_range_expr = iter_range->mutable_const_expr();
auto accu_init = c->mutable_accu_init();
auto accu_init_expr = accu_init->mutable_ident_expr();
auto loop_condition = c->mutable_loop_condition();
auto loop_condition_expr = loop_condition->mutable_const_expr();
auto loop_step = c->mutable_loop_step();
auto loop_step_expr = loop_step->mutable_ident_expr();
auto result = c->mutable_result();
auto result_expr = result->mutable_const_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitComprehension(c, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(iter_range_expr, iter_range, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(ITER_RANGE, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(accu_init_expr, accu_init, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(ACCU_INIT, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(loop_condition_expr, loop_condition, _))
.Times(1);
EXPECT_CALL(handler, PostVisitArg(LOOP_CONDITION, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(loop_step_expr, loop_step, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(LOOP_STEP, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(result_expr, result, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(RESULT, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitComprehension(c, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCreateList) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto list_expr = expr.mutable_list_expr();
auto arg0 = list_expr->add_elements();
auto const_expr = arg0->mutable_const_expr();
auto arg1 = list_expr->add_elements();
auto ident_expr = arg1->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCreateList(list_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitCreateList(list_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCreateStruct) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto struct_expr = expr.mutable_struct_expr();
auto entry0 = struct_expr->add_entries();
auto key = entry0->mutable_map_key()->mutable_const_expr();
auto value = entry0->mutable_value()->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCreateStruct(struct_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(key, &entry0->map_key(), _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(value, &entry0->value(), _)).Times(1);
EXPECT_CALL(handler, PostVisitCreateStruct(struct_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckExprHandlers) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto struct_expr = expr.mutable_struct_expr();
auto entry0 = struct_expr->add_entries();
entry0->mutable_map_key()->mutable_const_expr();
entry0->mutable_value()->mutable_ident_expr();
EXPECT_CALL(handler, PreVisitExpr(_, _)).Times(3);
EXPECT_CALL(handler, PostVisitExpr(_, _)).Times(3);
AstTraverse(&expr, &source_info, &handler);
}
}
} | 8 |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_SOURCE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_SOURCE_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
namespace cel {
namespace common_internal {
class SourceImpl;
}
class Source;
using SourcePosition = int32_t;
struct SourceRange final {
SourcePosition begin = -1;
SourcePosition end = -1;
};
inline bool operator==(const SourceRange& lhs, const SourceRange& rhs) {
return lhs.begin == rhs.begin && lhs.end == rhs.end;
}
inline bool operator!=(const SourceRange& lhs, const SourceRange& rhs) {
return !operator==(lhs, rhs);
}
struct SourceLocation final {
int32_t line = -1;
int32_t column = -1;
};
inline bool operator==(const SourceLocation& lhs, const SourceLocation& rhs) {
return lhs.line == rhs.line && lhs.column == rhs.column;
}
inline bool operator!=(const SourceLocation& lhs, const SourceLocation& rhs) {
return !operator==(lhs, rhs);
}
class SourceContentView final {
public:
SourceContentView(const SourceContentView&) = default;
SourceContentView(SourceContentView&&) = default;
SourceContentView& operator=(const SourceContentView&) = default;
SourceContentView& operator=(SourceContentView&&) = default;
SourcePosition size() const;
bool empty() const;
char32_t at(SourcePosition position) const;
std::string ToString(SourcePosition begin, SourcePosition end) const;
std::string ToString(SourcePosition begin) const {
return ToString(begin, size());
}
std::string ToString() const { return ToString(0); }
void AppendToString(std::string& dest) const;
private:
friend class Source;
constexpr SourceContentView() = default;
constexpr explicit SourceContentView(absl::Span<const char> view)
: view_(view) {}
constexpr explicit SourceContentView(absl::Span<const uint8_t> view)
: view_(view) {}
constexpr explicit SourceContentView(absl::Span<const char16_t> view)
: view_(view) {}
constexpr explicit SourceContentView(absl::Span<const char32_t> view)
: view_(view) {}
absl::variant<absl::Span<const char>, absl::Span<const uint8_t>,
absl::Span<const char16_t>, absl::Span<const char32_t>>
view_;
};
class Source {
public:
using ContentView = SourceContentView;
Source(const Source&) = delete;
Source(Source&&) = delete;
virtual ~Source() = default;
Source& operator=(const Source&) = delete;
Source& operator=(Source&&) = delete;
virtual absl::string_view description() const
ABSL_ATTRIBUTE_LIFETIME_BOUND = 0;
absl::optional<SourceLocation> GetLocation(SourcePosition position) const;
absl::optional<SourcePosition> GetPosition(
const SourceLocation& location) const;
absl::optional<std::string> Snippet(int32_t line) const;
std::string DisplayErrorLocation(SourceLocation location) const;
virtual ContentView content() const ABSL_ATTRIBUTE_LIFETIME_BOUND = 0;
virtual absl::Span<const SourcePosition> line_offsets() const
ABSL_ATTRIBUTE_LIFETIME_BOUND = 0;
protected:
static constexpr ContentView EmptyContentView() { return ContentView(); }
static constexpr ContentView MakeContentView(absl::Span<const char> view) {
return ContentView(view);
}
static constexpr ContentView MakeContentView(absl::Span<const uint8_t> view) {
return ContentView(view);
}
static constexpr ContentView MakeContentView(
absl::Span<const char16_t> view) {
return ContentView(view);
}
static constexpr ContentView MakeContentView(
absl::Span<const char32_t> view) {
return ContentView(view);
}
private:
friend class common_internal::SourceImpl;
Source() = default;
absl::optional<SourcePosition> FindLinePosition(int32_t line) const;
absl::optional<std::pair<int32_t, SourcePosition>> FindLine(
SourcePosition position) const;
};
using SourcePtr = std::unique_ptr<Source>;
absl::StatusOr<absl::Nonnull<SourcePtr>> NewSource(
absl::string_view content, std::string description = "<input>");
absl::StatusOr<absl::Nonnull<SourcePtr>> NewSource(
const absl::Cord& content, std::string description = "<input>");
}
#endif
#include "common/source.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/overload.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "internal/unicode.h"
#include "internal/utf8.h"
namespace cel {
SourcePosition SourceContentView::size() const {
return static_cast<SourcePosition>(absl::visit(
absl::Overload(
[](absl::Span<const char> view) { return view.size(); },
[](absl::Span<const uint8_t> view) { return view.size(); },
[](absl::Span<const char16_t> view) { return view.size(); },
[](absl::Span<const char32_t> view) { return view.size(); }),
view_));
}
bool SourceContentView::empty() const {
return absl::visit(
absl::Overload(
[](absl::Span<const char> view) { return view.empty(); },
[](absl::Span<const uint8_t> view) { return view.empty(); },
[](absl::Span<const char16_t> view) { return view.empty(); },
[](absl::Span<const char32_t> view) { return view.empty(); }),
view_);
}
char32_t SourceContentView::at(SourcePosition position) const {
ABSL_DCHECK_GE(position, 0);
ABSL_DCHECK_LT(position, size());
return absl::visit(
absl::Overload(
[position =
static_cast<size_t>(position)](absl::Span<const char> view) {
return static_cast<char32_t>(static_cast<uint8_t>(view[position]));
},
[position =
static_cast<size_t>(position)](absl::Span<const uint8_t> view) {
return static_cast<char32_t>(view[position]);
},
[position =
static_cast<size_t>(position)](absl::Span<const char16_t> view) {
return static_cast<char32_t>(view[position]);
},
[position =
static_cast<size_t>(position)](absl::Span<const char32_t> view) {
return static_cast<char32_t>(view[position]);
}),
view_);
}
std::string SourceContentView::ToString(SourcePosition begin,
SourcePosition end) const {
ABSL_DCHECK_GE(begin, 0);
ABSL_DCHECK_LE(end, size());
ABSL_DCHECK_LE(begin, end);
return absl::visit(
absl::Overload(
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const char> view) {
view = view.subspan(begin, end - begin);
return std::string(view.data(), view.size());
},
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const uint8_t> view) {
view = view.subspan(begin, end - begin);
std::string result;
result.reserve(view.size() * 2);
for (const auto& code_point : view) {
internal::Utf8Encode(result, code_point);
}
result.shrink_to_fit();
return result;
},
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const char16_t> view) {
view = view.subspan(begin, end - begin);
std::string result;
result.reserve(view.size() * 3);
for (const auto& code_point : view) {
internal::Utf8Encode(result, code_point);
}
result.shrink_to_fit();
return result;
},
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const char32_t> view) {
view = view.subspan(begin, end - begin);
std::string result;
result.reserve(view.size() * 4);
for (const auto& code_point : view) {
internal::Utf8Encode(result, code_point);
}
result.shrink_to_fit();
return result;
}),
view_);
}
void SourceContentView::AppendToString(std::string& dest) const {
absl::visit(absl::Overload(
[&dest](absl::Span<const char> view) {
dest.append(view.data(), view.size());
},
[&dest](absl::Span<const uint8_t> view) {
for (const auto& code_point : view) {
internal::Utf8Encode(dest, code_point);
}
},
[&dest](absl::Span<const char16_t> view) {
for (const auto& code_point : view) {
internal::Utf8Encode(dest, code_point);
}
},
[&dest](absl::Span<const char32_t> view) {
for (const auto& code_point : view) {
internal::Utf8Encode(dest, code_point);
}
}),
view_);
}
namespace common_internal {
class SourceImpl : public Source {
public:
SourceImpl(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets)
: description_(std::move(description)),
line_offsets_(std::move(line_offsets)) {}
absl::string_view description() const final { return description_; }
absl::Span<const SourcePosition> line_offsets() const final {
return absl::MakeConstSpan(line_offsets_);
}
private:
const std::string description_;
const absl::InlinedVector<SourcePosition, 1> line_offsets_;
};
namespace {
class AsciiSource final : public SourceImpl {
public:
AsciiSource(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<char> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<char> text_;
};
class Latin1Source final : public SourceImpl {
public:
Latin1Source(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<uint8_t> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<uint8_t> text_;
};
class BasicPlaneSource final : public SourceImpl {
public:
BasicPlaneSource(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<char16_t> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<char16_t> text_;
};
class SupplementalPlaneSource final : public SourceImpl {
public:
SupplementalPlaneSource(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<char32_t> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<char32_t> text_;
};
template <typename T>
struct SourceTextTraits;
template <>
struct SourceTextTraits<absl::string_view> {
using iterator_type = absl::string_view;
static iterator_type Begin(absl::string_view text) { return text; }
static void Advance(iterator_type& it, size_t n) { it.remove_prefix(n); }
static void AppendTo(std::vector<uint8_t>& out, absl::string_view text,
size_t n) {
const auto* in = reinterpret_cast<const uint8_t*>(text.data());
out.insert(out.end(), in, in + n);
}
static std::vector<char> ToVector(absl::string_view in) {
std::vector<char> out;
out.reserve(in.size());
out.insert(out.end(), in.begin(), in.end());
return out;
}
};
template <>
struct SourceTextTraits<absl::Cord> {
using iterator_type = absl::Cord::CharIterator;
static iterator_type Begin(const absl::Cord& text) {
return text.char_begin();
}
static void Advance(iterator_type& it, size_t n) {
absl::Cord::Advance(&it, n);
}
static void AppendTo(std::vector<uint8_t>& out, const absl::Cord& text,
size_t n) {
auto it = text.char_begin();
while (n > 0) {
auto str = absl::Cord::ChunkRemaining(it);
size_t to_append = std::min(n, str.size());
const auto* in = reinterpret_cast<const uint8_t*>(str.data());
out.insert(out.end(), in, in + to_append);
n -= to_append;
absl::Cord::Advance(&it, to_append);
}
}
static std::vector<char> ToVector(const absl::Cord& in) {
std::vector<char> out;
out.reserve(in.size());
for (const auto& chunk : in.Chunks()) {
out.insert(out.end(), chunk.begin(), chunk.end());
}
return out;
}
};
template <typename T>
absl::StatusOr<SourcePtr> NewSourceImpl(std::string description, const T& text,
const size_t text_size) {
if (ABSL_PREDICT_FALSE(
text_size >
static_cast<size_t>(std::numeric_limits<int32_t>::max()))) {
return absl::InvalidArgumentError("expression larger than 2GiB limit");
}
using Traits = SourceTextTraits<T>;
size_t index = 0;
typename Traits::iterator_type it = Traits::Begin(text);
SourcePosition offset = 0;
char32_t code_point;
size_t code_units;
std::vector<uint8_t> data8;
std::vector<char16_t> data16;
std::vector<char32_t> data32;
absl::InlinedVector<SourcePosition, 1> line_offsets;
while (index < text_size) {
std::tie(code_point, code_units) = cel::internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
cel::internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
if (code_point <= 0x7f) {
Traits::Advance(it, code_units);
index += code_units;
++offset;
continue;
}
if (code_point <= 0xff) {
data8.reserve(text_size);
Traits::AppendTo(data8, text, index);
data8.push_back(static_cast<uint8_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto latin1;
}
if (code_point <= 0xffff) {
data16.reserve(text_size);
for (size_t offset = 0; offset < index; offset++) {
data16.push_back(static_cast<uint8_t>(text[offset]));
}
data16.push_back(static_cast<char16_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto basic;
}
data32.reserve(text_size);
for (size_t offset = 0; offset < index; offset++) {
data32.push_back(static_cast<char32_t>(text[offset]));
}
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto supplemental;
}
line_offsets.push_back(offset + 1);
return std::make_unique<AsciiSource>(
std::move(description), std::move(line_offsets), Traits::ToVector(text));
latin1:
while (index < text_size) {
std::tie(code_point, code_units) = internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
if (code_point <= 0xff) {
data8.push_back(static_cast<uint8_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
continue;
}
if (code_point <= 0xffff) {
data16.reserve(text_size);
for (const auto& value : data8) {
data16.push_back(value);
}
std::vector<uint8_t>().swap(data8);
data16.push_back(static_cast<char16_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto basic;
}
data32.reserve(text_size);
for (const auto& value : data8) {
data32.push_back(value);
}
std::vector<uint8_t>().swap(data8);
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto supplemental;
}
line_offsets.push_back(offset + 1);
return std::make_unique<Latin1Source>(
std::move(description), std::move(line_offsets), std::move(data8));
basic:
while (index < text_size) {
std::tie(code_point, code_units) = internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
if (code_point <= 0xffff) {
data16.push_back(static_cast<char16_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
continue;
}
data32.reserve(text_size);
for (const auto& value : data16) {
data32.push_back(static_cast<char32_t>(value));
}
std::vector<char16_t>().swap(data16);
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto supplemental;
}
line_offsets.push_back(offset + 1);
return std::make_unique<BasicPlaneSource>(
std::move(description), std::move(line_offsets), std::move(data16));
supplemental:
while (index < text_size) {
std::tie(code_point, code_units) = internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
}
line_offsets.push_back(offset + 1);
return std::make_unique<SupplementalPlaneSource>(
std::move(description), std::move(line_offsets), std::move(data32));
}
}
}
absl::optional<SourceLocation> Source::GetLocation(
SourcePosition position) const {
if (auto line_and_offset = FindLine(position);
ABSL_PREDICT_TRUE(line_and_offset.has_value())) {
return SourceLocation{line_and_offset->first,
position - line_and_offset->second};
}
return absl::nullopt;
}
absl::optional<SourcePosition> Source::GetPosition(
const SourceLocation& location) const {
if (ABSL_PREDICT_FALSE(location.line < 1 || location.column < 0)) {
return absl::nullopt;
}
if (auto position = FindLinePosition(location.line);
ABSL_PREDICT_TRUE(position.has_value())) {
return *position + location.column;
}
return absl::nullopt;
}
absl::optional<std::string> Source::Snippet(int32_t line) const {
auto content = this->content();
auto start = FindLinePosition(line);
if (ABSL_PREDICT_FALSE(!start.has_value() || content.empty())) {
return absl::nullopt;
}
auto end = FindLinePosition(line + 1);
if (end.has_value()) {
return content.ToString(*start, *end - 1);
}
return content.ToString(*start);
}
std::string Source::DisplayErrorLocation(SourceLocation location) const {
constexpr char32_t kDot = '.';
constexpr char32_t kHat = '^';
constexpr char32_t kWideDot = 0xff0e;
constexpr char32_t kWideHat = 0xff3e;
absl::optional<std::string> snippet = Snippet(location.line);
if (!snippet || snippet->empty()) {
return "";
}
*snippet = absl::StrReplaceAll(*snippet, {{"\t", " "}});
absl::string_view snippet_view(*snippet);
std::string result;
absl::StrAppend(&result, "\n | ", *snippet);
absl::StrAppend(&result, "\n | ");
std::string index_line;
for (int32_t i = 0; i < location.column && !snippet_view.empty(); ++i) {
size_t count;
std::tie(std::ignore, count) = internal::Utf8Decode(snippet_view);
snippet_view.remove_prefix(count);
if (count > 1) {
internal::Utf8Encode(index_line, kWideDot);
} else {
internal::Utf8Encode(index_line, kDot);
}
}
size_t count = 0;
if (!snippet_view.empty()) {
std::tie(std::ignore, count) = internal::Utf8Decode(snippet_view);
}
if (count > 1) {
internal::Utf8Encode(index_line, kWideHat);
} else {
internal::Utf8Encode(index_line, kHat);
}
absl::StrAppend(&result, index_line);
return result;
}
absl::optional<SourcePosition> Source::FindLinePosition(int32_t line) const {
if (ABSL_PREDICT_FALSE(line < 1)) {
return absl::nullopt;
}
if (line == 1) {
return SourcePosition{0};
}
const auto line_offsets = this->line_offsets();
if (ABSL_PREDICT_TRUE(line <= static_cast<int32_t>(line_offsets.size()))) {
return line_offsets[static_cast<size_t>(line - 2)];
}
return absl::nullopt;
}
absl::optional<std::pair<int32_t, SourcePosition>> Source::FindLine(
SourcePosition position) const {
if (ABSL_PREDICT_FALSE(position < 0)) {
return absl::nullopt;
}
int32_t line = 1;
const auto line_offsets = this->line_offsets();
for (const auto& line_offset : line_offsets) {
if (line_offset > position) {
break;
}
++line;
}
if (line == 1) {
return std::make_pair(line, SourcePosition{0});
}
return std::make_pair(line, line_offsets[static_cast<size_t>(line) - 2]);
}
absl::StatusOr<absl::Nonnull<SourcePtr>> NewSource(absl::string_view content,
std::string description) {
return common_internal::NewSourceImpl(std::move(description), content,
content.size());
}
absl::StatusOr<absl::Nonnull<SourcePtr>> NewSource(const absl::Cord& content,
std::string description) {
return common_internal::NewSourceImpl(std::move(description), content,
content.size());
}
} | #include "common/source.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::ElementsAre;
using testing::Eq;
using testing::Ne;
using testing::Optional;
TEST(SourceRange, Default) {
SourceRange range;
EXPECT_EQ(range.begin, -1);
EXPECT_EQ(range.end, -1);
}
TEST(SourceRange, Equality) {
EXPECT_THAT((SourceRange{}), (Eq(SourceRange{})));
EXPECT_THAT((SourceRange{0, 1}), (Ne(SourceRange{0, 0})));
}
TEST(SourceLocation, Default) {
SourceLocation location;
EXPECT_EQ(location.line, -1);
EXPECT_EQ(location.column, -1);
}
TEST(SourceLocation, Equality) {
EXPECT_THAT((SourceLocation{}), (Eq(SourceLocation{})));
EXPECT_THAT((SourceLocation{1, 1}), (Ne(SourceLocation{1, 0})));
}
TEST(StringSource, Description) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource("c.d &&\n\t b.c.arg(10) &&\n\t test(10)", "offset-test"));
EXPECT_THAT(source->description(), Eq("offset-test"));
}
TEST(StringSource, Content) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource("c.d &&\n\t b.c.arg(10) &&\n\t test(10)", "offset-test"));
EXPECT_THAT(source->content().ToString(),
Eq("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"));
}
TEST(StringSource, PositionAndLocation) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource("c.d &&\n\t b.c.arg(10) &&\n\t test(10)", "offset-test"));
EXPECT_THAT(source->line_offsets(), ElementsAre(7, 24, 35));
auto start = source->GetPosition(SourceLocation{int32_t{1}, int32_t{2}});
auto end = source->GetPosition(SourceLocation{int32_t{3}, int32_t{2}});
ASSERT_TRUE(start.has_value());
ASSERT_TRUE(end.has_value());
EXPECT_THAT(source->GetLocation(*start),
Optional(Eq(SourceLocation{int32_t{1}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(*end),
Optional(Eq(SourceLocation{int32_t{3}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(-1), Eq(absl::nullopt));
EXPECT_THAT(source->content().ToString(*start, *end),
Eq("d &&\n\t b.c.arg(10) &&\n\t "));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{0}, int32_t{0}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{1}, int32_t{-1}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{4}, int32_t{0}}),
Eq(absl::nullopt));
}
TEST(StringSource, SnippetSingle) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("hello, world", "one-line-test"));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello, world")));
EXPECT_THAT(source->Snippet(2), Eq(absl::nullopt));
}
TEST(StringSource, SnippetMulti) {
ASSERT_OK_AND_ASSIGN(auto source,
NewSource("hello\nworld\nmy\nbub\n", "four-line-test"));
EXPECT_THAT(source->Snippet(0), Eq(absl::nullopt));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello")));
EXPECT_THAT(source->Snippet(2), Optional(Eq("world")));
EXPECT_THAT(source->Snippet(3), Optional(Eq("my")));
EXPECT_THAT(source->Snippet(4), Optional(Eq("bub")));
EXPECT_THAT(source->Snippet(5), Optional(Eq("")));
EXPECT_THAT(source->Snippet(6), Eq(absl::nullopt));
}
TEST(CordSource, Description) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"),
"offset-test"));
EXPECT_THAT(source->description(), Eq("offset-test"));
}
TEST(CordSource, Content) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"),
"offset-test"));
EXPECT_THAT(source->content().ToString(),
Eq("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"));
}
TEST(CordSource, PositionAndLocation) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"),
"offset-test"));
EXPECT_THAT(source->line_offsets(), ElementsAre(7, 24, 35));
auto start = source->GetPosition(SourceLocation{int32_t{1}, int32_t{2}});
auto end = source->GetPosition(SourceLocation{int32_t{3}, int32_t{2}});
ASSERT_TRUE(start.has_value());
ASSERT_TRUE(end.has_value());
EXPECT_THAT(source->GetLocation(*start),
Optional(Eq(SourceLocation{int32_t{1}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(*end),
Optional(Eq(SourceLocation{int32_t{3}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(-1), Eq(absl::nullopt));
EXPECT_THAT(source->content().ToString(*start, *end),
Eq("d &&\n\t b.c.arg(10) &&\n\t "));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{0}, int32_t{0}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{1}, int32_t{-1}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{4}, int32_t{0}}),
Eq(absl::nullopt));
}
TEST(CordSource, SnippetSingle) {
ASSERT_OK_AND_ASSIGN(auto source,
NewSource(absl::Cord("hello, world"), "one-line-test"));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello, world")));
EXPECT_THAT(source->Snippet(2), Eq(absl::nullopt));
}
TEST(CordSource, SnippetMulti) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("hello\nworld\nmy\nbub\n"), "four-line-test"));
EXPECT_THAT(source->Snippet(0), Eq(absl::nullopt));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello")));
EXPECT_THAT(source->Snippet(2), Optional(Eq("world")));
EXPECT_THAT(source->Snippet(3), Optional(Eq("my")));
EXPECT_THAT(source->Snippet(4), Optional(Eq("bub")));
EXPECT_THAT(source->Snippet(5), Optional(Eq("")));
EXPECT_THAT(source->Snippet(6), Eq(absl::nullopt));
}
TEST(Source, DisplayErrorLocationBasic) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello' +\n 'world'"));
SourceLocation location{2, 3};
EXPECT_EQ(source->DisplayErrorLocation(location),
"\n | 'world'"
"\n | ...^");
}
TEST(Source, DisplayErrorLocationOutOfRange) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello world!'"));
SourceLocation location{3, 3};
EXPECT_EQ(source->DisplayErrorLocation(location), "");
}
TEST(Source, DisplayErrorLocationTabsShortened) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello' +\n\t\t'world!'"));
SourceLocation location{2, 4};
EXPECT_EQ(source->DisplayErrorLocation(location),
"\n | 'world!'"
"\n | ....^");
}
TEST(Source, DisplayErrorLocationFullWidth) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello'"));
SourceLocation location{1, 2};
EXPECT_EQ(source->DisplayErrorLocation(location),
"\n | 'Hello'"
"\n | ..^");
}
}
} | 9 |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_MEMORY_H_
#define THIRD_PARTY_CEL_CPP_COMMON_MEMORY_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <ostream>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/numeric/bits.h"
#include "common/allocator.h"
#include "common/arena.h"
#include "common/data.h"
#include "common/internal/metadata.h"
#include "common/internal/reference_count.h"
#include "common/native_type.h"
#include "common/reference_count.h"
#include "internal/exceptions.h"
#include "internal/to_address.h"
#include "google/protobuf/arena.h"
namespace cel {
using internal::to_address;
enum class MemoryManagement {
kPooling = 1,
kReferenceCounting,
};
std::ostream& operator<<(std::ostream& out, MemoryManagement memory_management);
class Data;
class ABSL_ATTRIBUTE_TRIVIAL_ABI [[nodiscard]] Owner;
class Borrower;
template <typename T>
class ABSL_ATTRIBUTE_TRIVIAL_ABI Shared;
template <typename T>
class ABSL_ATTRIBUTE_TRIVIAL_ABI SharedView;
template <typename T>
class ABSL_ATTRIBUTE_TRIVIAL_ABI [[nodiscard]] Unique;
template <typename T>
struct EnableSharedFromThis;
class MemoryManager;
class ReferenceCountingMemoryManager;
class PoolingMemoryManager;
namespace common_internal {
absl::Nullable<const ReferenceCount*> OwnerRelease(Owner& owner) noexcept;
template <typename T>
T* GetPointer(const Shared<T>& shared);
template <typename T>
const ReferenceCount* GetReferenceCount(const Shared<T>& shared);
template <typename T>
Shared<T> MakeShared(AdoptRef, T* value, const ReferenceCount* refcount);
template <typename T>
Shared<T> MakeShared(T* value, const ReferenceCount* refcount);
template <typename T>
T* GetPointer(SharedView<T> shared);
template <typename T>
const ReferenceCount* GetReferenceCount(SharedView<T> shared);
template <typename T>
SharedView<T> MakeSharedView(T* value, const ReferenceCount* refcount);
}
template <typename To, typename From>
Shared<To> StaticCast(const Shared<From>& from);
template <typename To, typename From>
Shared<To> StaticCast(Shared<From>&& from);
template <typename To, typename From>
SharedView<To> StaticCast(SharedView<From> from);
class ABSL_ATTRIBUTE_TRIVIAL_ABI [[nodiscard]] Owner final {
private:
static constexpr uintptr_t kNone = common_internal::kMetadataOwnerNone;
static constexpr uintptr_t kReferenceCountBit =
common_internal::kMetadataOwnerReferenceCountBit;
static constexpr uintptr_t kArenaBit =
common_internal::kMetadataOwnerArenaBit;
static constexpr uintptr_t kBits = common_internal::kMetadataOwnerBits;
static constexpr uintptr_t kPointerMask =
common_internal::kMetadataOwnerPointerMask;
public:
static Owner None() noexcept { return Owner(); }
static Owner Allocator(Allocator<> allocator) noexcept {
auto* arena = allocator.arena();
return arena != nullptr ? Arena(arena) : None();
}
static Owner Arena(absl::Nonnull<google::protobuf::Arena*> arena
ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
ABSL_DCHECK(arena != nullptr);
return Owner(reinterpret_cast<uintptr_t>(arena) | kArenaBit);
}
static Owner Arena(std::nullptr_t) = delete;
static Owner ReferenceCount(
absl::Nonnull<const ReferenceCount*> reference_count
ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
ABSL_DCHECK(reference_count != nullptr);
common_internal::StrongRef(*reference_count);
return Owner(reinterpret_cast<uintptr_t>(reference_count) |
kReferenceCountBit);
}
static Owner ReferenceCount(std::nullptr_t) = delete;
Owner() = default;
Owner(const Owner& other) noexcept : Owner(CopyFrom(other.ptr_)) {}
Owner(Owner&& other) noexcept : Owner(MoveFrom(other.ptr_)) {}
explicit Owner(Borrower borrower) noexcept;
~Owner() { Destroy(ptr_); }
Owner& operator=(const Owner& other) noexcept {
if (ptr_ != other.ptr_) {
Destroy(ptr_);
ptr_ = CopyFrom(other.ptr_);
}
return *this;
}
Owner& operator=(Owner&& other) noexcept {
if (ABSL_PREDICT_TRUE(this != &other)) {
Destroy(ptr_);
ptr_ = MoveFrom(other.ptr_);
}
return *this;
}
explicit operator bool() const noexcept { return !IsNone(ptr_); }
absl::Nullable<google::protobuf::Arena*> arena() const noexcept {
return (ptr_ & Owner::kBits) == Owner::kArenaBit
? reinterpret_cast<google::protobuf::Arena*>(ptr_ & Owner::kPointerMask)
: nullptr;
}
friend bool operator==(const Owner& lhs, const Owner& rhs) noexcept {
return lhs.ptr_ == rhs.ptr_;
}
private:
friend class Borrower;
friend absl::Nullable<const common_internal::ReferenceCount*>
common_internal::OwnerRelease(Owner& owner) noexcept;
constexpr explicit Owner(uintptr_t ptr) noexcept : ptr_(ptr) {}
static constexpr bool IsNone(uintptr_t ptr) noexcept { return ptr == kNone; }
static constexpr bool IsArena(uintptr_t ptr) noexcept {
return (ptr & kArenaBit) != kNone;
}
static constexpr bool IsReferenceCount(uintptr_t ptr) noexcept {
return (ptr & kReferenceCountBit) != kNone;
}
ABSL_ATTRIBUTE_RETURNS_NONNULL
static absl::Nonnull<google::protobuf::Arena*> AsArena(uintptr_t ptr) noexcept {
ABSL_ASSERT(IsArena(ptr));
return reinterpret_cast<google::protobuf::Arena*>(ptr & kPointerMask);
}
ABSL_ATTRIBUTE_RETURNS_NONNULL
static absl::Nonnull<const common_internal::ReferenceCount*> AsReferenceCount(
uintptr_t ptr) noexcept {
ABSL_ASSERT(IsReferenceCount(ptr));
return reinterpret_cast<const common_internal::ReferenceCount*>(
ptr & kPointerMask);
}
static uintptr_t CopyFrom(uintptr_t other) noexcept { return Own(other); }
static uintptr_t MoveFrom(uintptr_t& other) noexcept {
return std::exchange(other, kNone);
}
static void Destroy(uintptr_t ptr) noexcept { Unown(ptr); }
static uintptr_t Own(uintptr_t ptr) noexcept {
if (IsReferenceCount(ptr)) {
const auto* refcount = Owner::AsReferenceCount(ptr);
ABSL_ASSUME(refcount != nullptr);
common_internal::StrongRef(refcount);
}
return ptr;
}
static void Unown(uintptr_t ptr) noexcept {
if (IsReferenceCount(ptr)) {
const auto* reference_count = AsReferenceCount(ptr);
ABSL_ASSUME(reference_count != nullptr);
common_internal::StrongUnref(reference_count);
}
}
uintptr_t ptr_ = kNone;
};
inline bool operator!=(const Owner& lhs, const Owner& rhs) noexcept {
return !operator==(lhs, rhs);
}
namespace common_internal {
inline absl::Nullable<const ReferenceCount*> OwnerRelease(
Owner& owner) noexcept {
uintptr_t ptr = std::exchange(owner.ptr_, uintptr_t{0});
if (Owner::IsReferenceCount(ptr)) {
return Owner::AsReferenceCount(ptr);
}
return nullptr;
}
}
class Borrower final {
public:
static Borrower None() noexcept { return Borrower(); }
static Borrower Allocator(Allocator<> allocator) noexcept {
auto* arena = allocator.arena();
return arena != nullptr ? Arena(arena) : None();
}
static Borrower Arena(absl::Nonnull<google::protobuf::Arena*> arena
ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
ABSL_DCHECK(arena != nullptr);
return Borrower(reinterpret_cast<uintptr_t>(arena) | Owner::kArenaBit);
}
static Borrower Arena(std::nullptr_t) = delete;
static Borrower ReferenceCount(
absl::Nonnull<const ReferenceCount*> reference_count
ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
ABSL_DCHECK(reference_count != nullptr);
return Borrower(reinterpret_cast<uintptr_t>(reference_count) |
Owner::kReferenceCountBit);
}
static Borrower ReferenceCount(std::nullptr_t) = delete;
Borrower() = default;
Borrower(const Borrower&) = default;
Borrower(Borrower&&) = default;
Borrower& operator=(const Borrower&) = default;
Borrower& operator=(Borrower&&) = default;
Borrower(const Owner& owner ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept
: ptr_(owner.ptr_) {}
Borrower& operator=(
const Owner& owner ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
ptr_ = owner.ptr_;
return *this;
}
Borrower& operator=(Owner&&) = delete;
explicit operator bool() const noexcept { return !Owner::IsNone(ptr_); }
absl::Nullable<google::protobuf::Arena*> arena() const noexcept {
return (ptr_ & Owner::kBits) == Owner::kArenaBit
? reinterpret_cast<google::protobuf::Arena*>(ptr_ & Owner::kPointerMask)
: nullptr;
}
friend bool operator==(Borrower lhs, Borrower rhs) noexcept {
return lhs.ptr_ == rhs.ptr_;
}
private:
friend class Owner;
constexpr explicit Borrower(uintptr_t ptr) noexcept : ptr_(ptr) {}
uintptr_t ptr_ = Owner::kNone;
};
inline bool operator!=(Borrower lhs, Borrower rhs) noexcept {
return !operator==(lhs, rhs);
}
inline bool operator==(Borrower lhs, const Owner& rhs) noexcept {
return operator==(lhs, Borrower(rhs));
}
inline bool operator==(const Owner& lhs, Borrower rhs) noexcept {
return operator==(Borrower(lhs), rhs);
}
inline bool operator!=(Borrower lhs, const Owner& rhs) noexcept {
return !operator==(lhs, rhs);
}
inline bool operator!=(const Owner& lhs, Borrower rhs) noexcept {
return !operator==(lhs, rhs);
}
inline Owner::Owner(Borrower borrower) noexcept
: ptr_(Owner::Own(borrower.ptr_)) {}
template <typename T, typename... Args>
Unique<T> AllocateUnique(Allocator<> allocator, Args&&... args);
template <typename T>
Unique<T> WrapUnique(T* object);
template <typename T>
class ABSL_ATTRIBUTE_TRIVIAL_ABI [[nodiscard]] Unique final {
public:
using element_type = T;
static_assert(!std::is_array_v<T>, "T must not be an array");
static_assert(!std::is_reference_v<T>, "T must not be a reference");
static_assert(!std::is_volatile_v<T>, "T must not be volatile qualified");
Unique() = default;
Unique(const Unique&) = delete;
Unique& operator=(const Unique&) = delete;
explicit Unique(T* ptr) noexcept : Unique(ptr, nullptr) {}
Unique(std::nullptr_t) noexcept : Unique() {}
Unique(Unique&& other) noexcept : Unique(other.ptr_, other.arena_) {
other.ptr_ = nullptr;
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
Unique(Unique<U>&& other) noexcept : Unique(other.ptr_, other.arena_) {
other.ptr_ = nullptr;
}
~Unique() { Delete(); }
Unique& operator=(Unique&& other) noexcept {
if (ABSL_PREDICT_TRUE(this != &other)) {
Delete();
ptr_ = other.ptr_;
arena_ = other.arena_;
other.ptr_ = nullptr;
}
return *this;
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
Unique& operator=(U* other) noexcept {
reset(other);
return *this;
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
Unique& operator=(Unique<U>&& other) noexcept {
Delete();
ptr_ = other.ptr_;
arena_ = other.arena_;
other.ptr_ = nullptr;
return *this;
}
Unique& operator=(std::nullptr_t) noexcept {
reset();
return *this;
}
T& operator*() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_DCHECK(static_cast<bool>(*this));
return *get();
}
absl::Nonnull<T*> operator->() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_DCHECK(static_cast<bool>(*this));
return get();
}
ABSL_MUST_USE_RESULT T* release() noexcept {
if constexpr (!IsArenaDestructorSkippable<T>::value) {
if (static_cast<bool>(*this) && arena_ != nullptr) {
arena_->OwnDestructor(ptr_);
}
}
return std::exchange(ptr_, nullptr);
}
void reset() noexcept { reset(nullptr); }
void reset(T* ptr) noexcept {
Delete();
ptr_ = ptr;
arena_ = nullptr;
}
void reset(std::nullptr_t) noexcept {
Delete();
ptr_ = nullptr;
}
explicit operator bool() const noexcept { return get() != nullptr; }
friend void swap(Unique& lhs, Unique& rhs) noexcept {
using std::swap;
swap(lhs.ptr_, rhs.ptr_);
swap(lhs.arena_, rhs.arena_);
}
private:
template <typename U>
friend class Unique;
template <typename U, typename... Args>
friend Unique<U> AllocateUnique(Allocator<> allocator, Args&&... args);
friend class ReferenceCountingMemoryManager;
friend class PoolingMemoryManager;
friend struct std::pointer_traits<Unique<T>>;
constexpr Unique(T* ptr, google::protobuf::Arena* arena) noexcept
: ptr_(ptr), arena_(arena) {}
T* get() const noexcept { return ptr_; }
void Delete() const noexcept {
if (static_cast<bool>(*this)) {
if (arena_ != nullptr) {
if constexpr (!IsArenaDestructorSkippable<T>::value) {
ptr_->~T();
}
} else {
google::protobuf::Arena::Destroy(ptr_);
}
}
}
T* ptr_ = nullptr;
google::protobuf::Arena* arena_ = nullptr;
};
template <typename T>
Unique(T*) -> Unique<T>;
template <typename T, typename... Args>
Unique<T> AllocateUnique(Allocator<> allocator, Args&&... args) {
T* object;
auto* arena = allocator.arena();
if constexpr (IsArenaConstructible<T>::value) {
object = google::protobuf::Arena::Create<T>(arena, std::forward<Args>(args)...);
arena = nullptr;
} else {
void* p = allocator.allocate_bytes(sizeof(T), alignof(T));
CEL_INTERNAL_TRY { object = ::new (p) T(std::forward<Args>(args)...); }
CEL_INTERNAL_CATCH_ANY {
allocator.deallocate_bytes(p, sizeof(T), alignof(T));
CEL_INTERNAL_RETHROW;
}
}
return Unique<T>(object, arena);
}
template <typename T>
Unique<T> WrapUnique(T* object) {
return Unique<T>(object);
}
}
namespace std {
template <typename T>
struct pointer_traits<cel::Unique<T>> {
using pointer = cel::Unique<T>;
using element_type = typename cel::Unique<T>::element_type;
using difference_type = ptrdiff_t;
template <typename U>
using rebind = cel::Unique<U>;
static element_type* to_address(const pointer& p) noexcept { return p.ptr_; }
};
}
namespace cel {
template <typename T>
class ABSL_ATTRIBUTE_TRIVIAL_ABI Shared final {
public:
Shared() = default;
Shared(const Shared& other)
: value_(other.value_), refcount_(other.refcount_) {
common_internal::StrongRef(refcount_);
}
Shared(Shared&& other) noexcept
: value_(other.value_), refcount_(other.refcount_) {
other.value_ = nullptr;
other.refcount_ = nullptr;
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
Shared(const Shared<U>& other)
: value_(other.value_), refcount_(other.refcount_) {
common_internal::StrongRef(refcount_);
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
Shared(Shared<U>&& other) noexcept
: value_(other.value_), refcount_(other.refcount_) {
other.value_ = nullptr;
other.refcount_ = nullptr;
}
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
explicit Shared(SharedView<U> other);
template <typename U>
Shared(const Shared<U>& alias, T* ptr)
: value_(ptr), refcount_(alias.refcount_) {
common_internal::StrongRef(refcount_);
}
template <typename U>
Shared(Shared<U>&& alias, T* ptr) noexcept
: value_(ptr), refcount_(alias.refcount_) {
alias.value_ = nullptr;
alias.refcount_ = nullptr;
}
~Shared() { common_internal::StrongUnref(refcount_); }
Shared& operator=(const Shared& other) {
common_internal::StrongRef(other.refcount_);
common_internal::StrongUnref(refcount_);
value_ = other.value_;
refcount_ = other.refcount_;
return *this;
}
Shared& operator=(Shared&& other) noexcept {
common_internal::StrongUnref(refcount_);
value_ = other.value_;
refcount_ = other.refcount_;
other.value_ = nullptr;
other.refcount_ = nullptr;
return *this;
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
Shared& operator=(const Shared<U>& other) {
common_internal::StrongRef(other.refcount_);
common_internal::StrongUnref(refcount_);
value_ = other.value_;
refcount_ = other.refcount_;
return *this;
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
Shared& operator=(Shared<U>&& other) noexcept {
common_internal::StrongUnref(refcount_);
value_ = other.value_;
refcount_ = other.refcount_;
other.value_ = nullptr;
other.refcount_ = nullptr;
return *this;
}
template <typename U = T, typename = std::enable_if_t<!std::is_void_v<U>>>
U& operator*() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_DCHECK(!IsEmpty());
return *value_;
}
absl::Nonnull<T*> operator->() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_DCHECK(!IsEmpty());
return value_;
}
explicit operator bool() const { return !IsEmpty(); }
friend constexpr void swap(Shared& lhs, Shared& rhs) noexcept {
using std::swap;
swap(lhs.value_, rhs.value_);
swap(lhs.refcount_, rhs.refcount_);
}
private:
template <typename U>
friend class Shared;
template <typename U>
friend class SharedView;
template <typename To, typename From>
friend Shared<To> StaticCast(Shared<From>&& from);
template <typename U>
friend U* common_internal::GetPointer(const Shared<U>& shared);
template <typename U>
friend const common_internal::ReferenceCount*
common_internal::GetReferenceCount(const Shared<U>& shared);
template <typename U>
friend Shared<U> common_internal::MakeShared(
common_internal::AdoptRef, U* value,
const common_internal::ReferenceCount* refcount);
Shared(common_internal::AdoptRef, T* value,
const common_internal::ReferenceCount* refcount) noexcept
: value_(value), refcount_(refcount) {}
Shared(T* value, const common_internal::ReferenceCount* refcount) noexcept
: value_(value), refcount_(refcount) {
common_internal::StrongRef(refcount_);
}
bool IsEmpty() const noexcept { return value_ == nullptr; }
T* value_ = nullptr;
const common_internal::ReferenceCount* refcount_ = nullptr;
};
template <typename To, typename From>
inline Shared<To> StaticCast(const Shared<From>& from) {
return common_internal::MakeShared(
static_cast<To*>(common_internal::GetPointer(from)),
common_internal::GetReferenceCount(from));
}
template <typename To, typename From>
inline Shared<To> StaticCast(Shared<From>&& from) {
To* value = static_cast<To*>(from.value_);
const auto* refcount = from.refcount_;
from.value_ = nullptr;
from.refcount_ = nullptr;
return Shared<To>(common_internal::kAdoptRef, value, refcount);
}
template <typename T>
struct NativeTypeTraits<Shared<T>> final {
static bool SkipDestructor(const Shared<T>& shared) {
return common_internal::GetReferenceCount(shared) == nullptr;
}
};
template <typename T>
class ABSL_ATTRIBUTE_TRIVIAL_ABI SharedView final {
public:
SharedView() = default;
SharedView(const SharedView&) = default;
SharedView& operator=(const SharedView&) = default;
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
SharedView(const SharedView<U>& other)
: value_(other.value_), refcount_(other.refcount_) {}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
SharedView(SharedView<U>&& other) noexcept
: value_(other.value_), refcount_(other.refcount_) {}
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
SharedView(const Shared<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept
: value_(other.value_), refcount_(other.refcount_) {}
template <typename U>
SharedView(SharedView<U> alias, T* ptr)
: value_(ptr), refcount_(alias.refcount_) {}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
SharedView& operator=(const SharedView<U>& other) {
value_ = other.value_;
refcount_ = other.refcount_;
return *this;
}
template <
typename U,
typename = std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<U, T>>, std::is_convertible<U*, T*>>>>
SharedView& operator=(SharedView<U>&& other) noexcept {
value_ = other.value_;
refcount_ = other.refcount_;
return *this;
}
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
SharedView& operator=(
const Shared<U>& other ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
value_ = other.value_;
refcount_ = other.refcount_;
return *this;
}
template <typename U,
typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
SharedView& operator=(Shared<U>&&) = delete;
template <typename U = T, typename = std::enable_if_t<!std::is_void_v<U>>>
U& operator*() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_DCHECK(!IsEmpty());
return *value_;
}
absl::Nonnull<T*> operator->() const noexcept {
ABSL_DCHECK(!IsEmpty());
return value_;
}
explicit operator bool() const { return !IsEmpty(); }
friend constexpr void swap(SharedView& lhs, SharedView& rhs) noexcept {
using std::swap;
swap(lhs.value_, rhs.value_);
swap(lhs.refcount_, rhs.refcount_);
}
private:
template <typename U>
friend class Shared;
template <typename U>
friend class SharedView;
template <typename U>
friend U* common_internal::GetPointer(SharedView<U> shared);
template <typename U>
friend const common_internal::ReferenceCount*
common_internal::GetReferenceCount(SharedView<U> shared);
template <typename U>
friend SharedView<U> common_internal::MakeSharedView(
U* value, const common_internal::ReferenceCount* refcount);
SharedView(T* value, const common_internal::ReferenceCount* refcount)
: value_(value), refcount_(refcount) {}
bool IsEmpty() const noexcept { return value_ == nullptr; }
T* value_ = nullptr;
const common_internal::ReferenceCount* refcount_ = nullptr;
};
template <typename T>
template <typename U, typename>
Shared<T>::Shared(SharedView<U> other)
: value_(other.value_), refcount_(other.refcount_) {
StrongRef(refcount_);
}
template <typename To, typename From>
SharedView<To> StaticCast(SharedView<From> from) {
return common_internal::MakeSharedView(
static_cast<To*>(common_internal::GetPointer(from)),
common_internal::GetReferenceCount(from));
}
template <typename T>
struct EnableSharedFromThis
: public virtual common_internal::ReferenceCountFromThis {
protected:
Shared<T> shared_from_this() noexcept {
auto* const derived = static_cast<T*>(this);
auto* const refcount = common_internal::GetReferenceCountForThat(*this);
return common_internal::MakeShared(derived, refcount);
}
Shared<const T> shared_from_this() const noexcept {
auto* const derived = static_cast<const T*>(this);
auto* const refcount = common_internal::GetReferenceCountForThat(*this);
return common_internal::MakeShared(derived, refcount);
}
};
class ReferenceCountingMemoryManager final {
public:
ReferenceCountingMemoryManager(const ReferenceCountingMemoryManager&) =
delete;
ReferenceCountingMemoryManager(ReferenceCountingMemoryManager&&) = delete;
ReferenceCountingMemoryManager& operator=(
const ReferenceCountingMemoryManager&) = delete;
ReferenceCountingMemoryManager& operator=(ReferenceCountingMemoryManager&&) =
delete;
private:
template <typename T, typename... Args>
static ABSL_MUST_USE_RESULT Shared<T> MakeShared(Args&&... args) {
using U = std::remove_const_t<T>;
U* ptr;
common_internal::ReferenceCount* refcount;
std::tie(ptr, refcount) =
common_internal::MakeReferenceCount<U>(std::forward<Args>(args)...);
return common_internal::MakeShared(common_internal::kAdoptRef,
static_cast<T*>(ptr), refcount);
}
template <typename T, typename... Args>
static ABSL_MUST_USE_RESULT Unique<T> MakeUnique(Args&&... args) {
using U = std::remove_const_t<T>;
return Unique<T>(static_cast<T*>(new U(std::forward<Args>(args)...)),
nullptr);
}
static void* Allocate(size_t size, size_t alignment);
static bool Deallocate(void* ptr, size_t size, size_t alignment) noexcept;
explicit ReferenceCountingMemoryManager() = default;
friend class MemoryManager;
};
class PoolingMemoryManager final {
public:
PoolingMemoryManager(const PoolingMemoryManager&) = delete;
PoolingMemoryManager(PoolingMemoryManager&&) = delete;
PoolingMemoryManager& operator=(const PoolingMemoryManager&) = delete;
PoolingMemoryManager& operator=(PoolingMemoryManager&&) = delete;
private:
template <typename T, typename... Args>
ABSL_MUST_USE_RESULT static Shared<T> MakeShared(google::protobuf::Arena* arena,
Args&&... args) {
using U = std::remove_const_t<T>;
U* ptr = nullptr;
void* addr = Allocate(arena, sizeof(U), alignof(U));
CEL_INTERNAL_TRY {
ptr = ::new (addr) U(std::forward<Args>(args)...);
if constexpr (!std::is_trivially_destructible_v<U>) {
if (!NativeType::SkipDestructor(*ptr)) {
CEL_INTERNAL_TRY {
OwnCustomDestructor(arena, ptr, &DefaultDestructor<U>);
}
CEL_INTERNAL_CATCH_ANY {
ptr->~U();
CEL_INTERNAL_RETHROW;
}
}
}
if constexpr (std::is_base_of_v<common_internal::ReferenceC | #include "common/memory.h"
#include <cstddef>
#include <memory>
#include <sstream>
#include <string>
#include <utility>
#include "absl/base/config.h"
#include "absl/debugging/leak_check.h"
#include "absl/log/absl_check.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/internal/reference_count.h"
#include "common/native_type.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
#ifdef ABSL_HAVE_EXCEPTIONS
#include <stdexcept>
#endif
namespace cel {
namespace {
using testing::_;
using testing::IsFalse;
using testing::IsNull;
using testing::IsTrue;
using testing::NotNull;
using testing::TestParamInfo;
using testing::TestWithParam;
TEST(MemoryManagement, ostream) {
{
std::ostringstream out;
out << MemoryManagement::kPooling;
EXPECT_EQ(out.str(), "POOLING");
}
{
std::ostringstream out;
out << MemoryManagement::kReferenceCounting;
EXPECT_EQ(out.str(), "REFERENCE_COUNTING");
}
}
struct TrivialSmallObject {
uintptr_t ptr;
char padding[32 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialSmallSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialSmallObject>());
}
}
struct TrivialMediumObject {
uintptr_t ptr;
char padding[256 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialMediumSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialMediumObject>());
}
}
struct TrivialLargeObject {
uintptr_t ptr;
char padding[4096 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialLargeSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialLargeObject>());
}
}
TEST(RegionalMemoryManager, TrivialMixedSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
switch (i % 3) {
case 0:
static_cast<void>(memory_manager.MakeUnique<TrivialSmallObject>());
break;
case 1:
static_cast<void>(memory_manager.MakeUnique<TrivialMediumObject>());
break;
case 2:
static_cast<void>(memory_manager.MakeUnique<TrivialLargeObject>());
break;
}
}
}
struct TrivialHugeObject {
uintptr_t ptr;
char padding[32768 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialHugeSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialHugeObject>());
}
}
class SkippableDestructor {
public:
explicit SkippableDestructor(bool& deleted) : deleted_(deleted) {}
~SkippableDestructor() { deleted_ = true; }
private:
bool& deleted_;
};
}
template <>
struct NativeTypeTraits<SkippableDestructor> final {
static bool SkipDestructor(const SkippableDestructor&) { return true; }
};
namespace {
TEST(RegionalMemoryManager, SkippableDestructor) {
bool deleted = false;
{
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
auto shared = memory_manager.MakeShared<SkippableDestructor>(deleted);
static_cast<void>(shared);
}
EXPECT_FALSE(deleted);
}
class MemoryManagerTest : public TestWithParam<MemoryManagement> {
public:
void SetUp() override {}
void TearDown() override { Finish(); }
void Finish() { arena_.reset(); }
MemoryManagerRef memory_manager() {
switch (memory_management()) {
case MemoryManagement::kReferenceCounting:
return MemoryManager::ReferenceCounting();
case MemoryManagement::kPooling:
if (!arena_) {
arena_.emplace();
}
return MemoryManager::Pooling(&*arena_);
}
}
MemoryManagement memory_management() const { return GetParam(); }
static std::string ToString(TestParamInfo<MemoryManagement> param) {
std::ostringstream out;
out << param.param;
return out.str();
}
private:
absl::optional<google::protobuf::Arena> arena_;
};
TEST_P(MemoryManagerTest, AllocateAndDeallocateZeroSize) {
EXPECT_THAT(memory_manager().Allocate(0, 1), IsNull());
EXPECT_THAT(memory_manager().Deallocate(nullptr, 0, 1), IsFalse());
}
TEST_P(MemoryManagerTest, AllocateAndDeallocateBadAlignment) {
EXPECT_DEBUG_DEATH(absl::IgnoreLeak(memory_manager().Allocate(1, 0)), _);
EXPECT_DEBUG_DEATH(memory_manager().Deallocate(nullptr, 0, 0), _);
}
TEST_P(MemoryManagerTest, AllocateAndDeallocate) {
constexpr size_t kSize = 1024;
constexpr size_t kAlignment = __STDCPP_DEFAULT_NEW_ALIGNMENT__;
void* ptr = memory_manager().Allocate(kSize, kAlignment);
ASSERT_THAT(ptr, NotNull());
if (memory_management() == MemoryManagement::kReferenceCounting) {
EXPECT_THAT(memory_manager().Deallocate(ptr, kSize, kAlignment), IsTrue());
}
}
TEST_P(MemoryManagerTest, AllocateAndDeallocateOveraligned) {
constexpr size_t kSize = 1024;
constexpr size_t kAlignment = __STDCPP_DEFAULT_NEW_ALIGNMENT__ * 4;
void* ptr = memory_manager().Allocate(kSize, kAlignment);
ASSERT_THAT(ptr, NotNull());
if (memory_management() == MemoryManagement::kReferenceCounting) {
EXPECT_THAT(memory_manager().Deallocate(ptr, kSize, kAlignment), IsTrue());
}
}
class Object {
public:
Object() : deleted_(nullptr) {}
explicit Object(bool& deleted) : deleted_(&deleted) {}
~Object() {
if (deleted_ != nullptr) {
ABSL_CHECK(!*deleted_);
*deleted_ = true;
}
}
int member = 0;
private:
bool* deleted_;
};
class Subobject : public Object {
public:
using Object::Object;
};
TEST_P(MemoryManagerTest, Shared) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedAliasCopy) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = Shared<int>(object, &object->member);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedAliasMove) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = Shared<int>(std::move(object), &object->member);
EXPECT_FALSE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
}
Finish();
}
TEST_P(MemoryManagerTest, SharedStaticCastCopy) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = StaticCast<void>(object);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedStaticCastMove) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = StaticCast<void>(std::move(object));
EXPECT_FALSE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyConstruct) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> copied_object(object);
EXPECT_TRUE(copied_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveConstruct) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyAssign) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
object = moved_object;
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveAssign) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
object = std::move(moved_object);
EXPECT_FALSE(moved_object);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyConstructConvertible) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(object);
Shared<Object> copied_object(object);
EXPECT_TRUE(copied_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveConstructConvertible) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyAssignConvertible) {
bool deleted = false;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(subobject);
auto object = memory_manager().MakeShared<Object>();
EXPECT_TRUE(object);
object = subobject;
EXPECT_TRUE(object);
EXPECT_TRUE(subobject);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveAssignConvertible) {
bool deleted = false;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(subobject);
auto object = memory_manager().MakeShared<Object>();
EXPECT_TRUE(object);
object = std::move(subobject);
EXPECT_TRUE(object);
EXPECT_FALSE(subobject);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedSwap) {
using std::swap;
auto object1 = memory_manager().MakeShared<Object>();
auto object2 = memory_manager().MakeShared<Object>();
auto* const object1_ptr = object1.operator->();
auto* const object2_ptr = object2.operator->();
swap(object1, object2);
EXPECT_EQ(object1.operator->(), object2_ptr);
EXPECT_EQ(object2.operator->(), object1_ptr);
}
TEST_P(MemoryManagerTest, SharedPointee) {
using std::swap;
auto object = memory_manager().MakeShared<Object>();
EXPECT_EQ(std::addressof(*object), object.operator->());
}
TEST_P(MemoryManagerTest, SharedViewConstruct) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
dangling_object_view.emplace(object);
EXPECT_TRUE(*dangling_object_view);
{
auto copied_object = Shared<Object>(*dangling_object_view);
EXPECT_FALSE(deleted);
}
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyConstruct) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view = SharedView<Object>(object);
SharedView<Object> copied_object_view(object_view);
dangling_object_view.emplace(copied_object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveConstruct) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view = SharedView<Object>(object);
SharedView<Object> moved_object_view(std::move(object_view));
dangling_object_view.emplace(moved_object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyAssign) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view1 = SharedView<Object>(object);
SharedView<Object> object_view2(object);
object_view1 = object_view2;
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveAssign) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view1 = SharedView<Object>(object);
SharedView<Object> object_view2(object);
object_view1 = std::move(object_view2);
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyConstructConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto subobject_view = SharedView<Subobject>(subobject);
SharedView<Object> object_view(subobject_view);
dangling_object_view.emplace(object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveConstructConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto subobject_view = SharedView<Subobject>(subobject);
SharedView<Object> object_view(std::move(subobject_view));
dangling_object_view.emplace(object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyAssignConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto object_view1 = SharedView<Object>(subobject);
SharedView<Subobject> subobject_view2(subobject);
object_view1 = subobject_view2;
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveAssignConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto object_view1 = SharedView<Object>(subobject);
SharedView<Subobject> subobject_view2(subobject);
object_view1 = std::move(subobject_view2);
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewSwap) {
using std::swap;
auto object1 = memory_manager().MakeShared<Object>();
auto object2 = memory_manager().MakeShared<Object>();
auto object1_view = SharedView<Object>(object1);
auto object2_view = SharedView<Object>(object2);
swap(object1_view, object2_view);
EXPECT_EQ(object1_view.operator->(), object2.operator->());
EXPECT_EQ(object2_view.operator->(), object1.operator->());
}
TEST_P(MemoryManagerTest, SharedViewPointee) {
using std::swap;
auto object = memory_manager().MakeShared<Object>();
auto object_view = SharedView<Object>(object);
EXPECT_EQ(std::addressof(*object_view), object_view.operator->());
}
TEST_P(MemoryManagerTest, Unique) {
bool deleted = false;
{
auto object = memory_manager().MakeUnique<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
EXPECT_TRUE(deleted);
Finish();
}
TEST_P(MemoryManagerTest, UniquePointee) {
using std::swap;
auto object = memory_manager().MakeUnique<Object>();
EXPECT_EQ(std::addressof(*object), object.operator->());
}
TEST_P(MemoryManagerTest, UniqueSwap) {
using std::swap;
auto object1 = memory_manager().MakeUnique<Object>();
auto object2 = memory_manager().MakeUnique<Object>();
auto* const object1_ptr = object1.operator->();
auto* const object2_ptr = object2.operator->();
swap(object1, object2);
EXPECT_EQ(object1.operator->(), object2_ptr);
EXPECT_EQ(object2.operator->(), object1_ptr);
}
struct EnabledObject : EnableSharedFromThis<EnabledObject> {
Shared<EnabledObject> This() { return shared_from_this(); }
Shared<const EnabledObject> This() const { return shared_from_this(); }
};
TEST_P(MemoryManagerTest, EnableSharedFromThis) {
{
auto object = memory_manager().MakeShared<EnabledObject>();
auto this_object = object->This();
EXPECT_EQ(this_object.operator->(), object.operator->());
}
{
auto object = memory_manager().MakeShared<const EnabledObject>();
auto this_object = object->This();
EXPECT_EQ(this_object.operator->(), object.operator->());
}
Finish();
}
struct ThrowingConstructorObject {
ThrowingConstructorObject() {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::invalid_argument("ThrowingConstructorObject");
#endif
}
char padding[64];
};
TEST_P(MemoryManagerTest, SharedThrowingConstructor) {
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(static_cast<void>(
memory_manager().MakeShared<ThrowingConstructorObject>()),
std::invalid_argument);
#else
GTEST_SKIP();
#endif
}
TEST_P(MemoryManagerTest, UniqueThrowingConstructor) {
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(static_cast<void>(
memory_manager().MakeUnique<ThrowingConstructorObject>()),
std::invalid_argument);
#else
GTEST_SKIP();
#endif
}
INSTANTIATE_TEST_SUITE_P(
MemoryManagerTest, MemoryManagerTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
MemoryManagerTest::ToString);
TEST(Owner, None) {
EXPECT_THAT(Owner::None(), IsFalse());
EXPECT_THAT(Owner::None().arena(), IsNull());
}
TEST(Owner, Allocator) {
google::protobuf::Arena arena;
EXPECT_THAT(Owner::Allocator(NewDeleteAllocator()), IsFalse());
EXPECT_THAT(Owner::Allocator(ArenaAllocator(&arena)), IsTrue());
}
TEST(Owner, Arena) {
google::protobuf::Arena arena;
EXPECT_THAT(Owner::Arena(&arena), IsTrue());
EXPECT_EQ(Owner::Arena(&arena).arena(), &arena);
}
TEST(Owner, ReferenceCount) {
auto* refcount = new common_internal::ReferenceCounted();
EXPECT_THAT(Owner::ReferenceCount(refcount), IsTrue());
EXPECT_THAT(Owner::ReferenceCount(refcount).arena(), IsNull());
common_internal::StrongUnref(refcount);
}
TEST(Owner, Equality) {
google::protobuf::Arena arena1;
google::protobuf::Arena arena2;
EXPECT_EQ(Owner::None(), Owner::None());
EXPECT_EQ(Owner::Allocator(NewDeleteAllocator()), Owner::None());
EXPECT_EQ(Owner::Arena(&arena1), Owner::Arena(&arena1));
EXPECT_NE(Owner::Arena(&arena1), Owner::None());
EXPECT_NE(Owner::None(), Owner::Arena(&arena1));
EXPECT_NE(Owner::Arena(&arena1), Owner::Arena(&arena2));
EXPECT_EQ(Owner::Allocator(ArenaAllocator(&arena1)), Owner::Arena(&arena1));
}
TEST(Borrower, None) {
EXPECT_THAT(Borrower::None(), IsFalse());
EXPECT_THAT(Borrower::None().arena(), IsNull());
}
TEST(Borrower, Allocator) {
google::protobuf::Arena arena;
EXPECT_THAT(Borrower::Allocator(NewDeleteAllocator()), IsFalse());
EXPECT_THAT(Borrower::Allocator(ArenaAllocator(&arena)), IsTrue());
}
TEST(Borrower, Arena) {
google::protobuf::Arena arena;
EXPECT_THAT(Borrower::Arena(&arena), IsTrue());
EXPECT_EQ(Borrower::Arena(&arena).arena(), &arena);
}
TEST(Borrower, ReferenceCount) {
auto* refcount = new common_internal::ReferenceCounted();
EXPECT_THAT(Borrower::ReferenceCount(refcount), IsTrue());
EXPECT_THAT(Borrower::ReferenceCount(refcount).arena(), IsNull());
common_internal::StrongUnref(refcount);
}
TEST(Borrower, Equality) {
google::protobuf::Arena arena1;
google::protobuf::Arena arena2;
EXPECT_EQ(Borrower::None(), Borrower::None());
EXPECT_EQ(Borrower::Allocator(NewDeleteAllocator()), Borrower::None());
EXPECT_EQ(Borrower::Arena(&arena1), Borrower::Arena(&arena1));
EXPECT_NE(Borrower::Arena(&arena1), Borrower::None());
EXPECT_NE(Borrower::None(), Borrower::Arena(&arena1));
EXPECT_NE(Borrower::Arena(&arena1), Borrower::Arena(&arena2));
EXPECT_EQ(Borrower::Allocator(ArenaAllocator(&arena1)),
Borrower::Arena(&arena1));
}
TEST(OwnerBorrower, CopyConstruct) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2(owner1);
Borrower borrower(owner1);
EXPECT_EQ(owner1, owner2);
EXPECT_EQ(owner1, borrower);
EXPECT_EQ(borrower, owner1);
}
TEST(OwnerBorrower, MoveConstruct) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2(std::move(owner1));
Borrower borrower(owner2);
EXPECT_EQ(owner2, borrower);
EXPECT_EQ(borrower, owner2);
}
TEST(OwnerBorrower, CopyAssign) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2;
owner2 = owner1;
Borrower borrower(owner1);
EXPECT_EQ(owner1, owner2);
EXPECT_EQ(owner1, borrower);
EXPECT_EQ(borrower, owner1);
}
TEST(OwnerBorrower, MoveAssign) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2;
owner2 = std::move(owner1);
Borrower borrower(owner2);
EXPECT_EQ(owner2, borrower);
EXPECT_EQ(borrower, owner2);
}
TEST(Unique, ToAddress) {
Unique<bool> unique;
EXPECT_EQ(cel::to_address(unique), nullptr);
unique = AllocateUnique<bool>(NewDeleteAllocator());
EXPECT_EQ(cel::to_address(unique), unique.operator->());
}
}
} | 10 |
#ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_UNKNOWN_FUNCTION_RESULT_SET_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_UNKNOWN_FUNCTION_RESULT_SET_H_
#include "base/function_result.h"
#include "base/function_result_set.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
using UnknownFunctionResult = ::cel::FunctionResult;
using UnknownFunctionResultSet = ::cel::FunctionResultSet;
}
}
}
}
#endif
#include "eval/public/unknown_function_result_set.h" | #include "eval/public/unknown_function_result_set.h"
#include <sys/ucontext.h>
#include <memory>
#include <string>
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/empty.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/arena.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "internal/testing.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using ::google::protobuf::ListValue;
using ::google::protobuf::Struct;
using ::google::protobuf::Arena;
using testing::Eq;
using testing::SizeIs;
CelFunctionDescriptor kTwoInt("TwoInt", false,
{CelValue::Type::kInt64, CelValue::Type::kInt64});
CelFunctionDescriptor kOneInt("OneInt", false, {CelValue::Type::kInt64});
TEST(UnknownFunctionResult, Equals) {
UnknownFunctionResult call1(kTwoInt, 0);
UnknownFunctionResult call2(kTwoInt, 0);
EXPECT_TRUE(call1.IsEqualTo(call2));
UnknownFunctionResult call3(kOneInt, 0);
UnknownFunctionResult call4(kOneInt, 0);
EXPECT_TRUE(call3.IsEqualTo(call4));
UnknownFunctionResultSet call_set({call1, call3});
EXPECT_EQ(call_set.size(), 2);
EXPECT_EQ(*call_set.begin(), call3);
EXPECT_EQ(*(++call_set.begin()), call1);
}
TEST(UnknownFunctionResult, InequalDescriptor) {
UnknownFunctionResult call1(kTwoInt, 0);
UnknownFunctionResult call2(kOneInt, 0);
EXPECT_FALSE(call1.IsEqualTo(call2));
CelFunctionDescriptor one_uint("OneInt", false, {CelValue::Type::kUint64});
UnknownFunctionResult call3(kOneInt, 0);
UnknownFunctionResult call4(one_uint, 0);
EXPECT_FALSE(call3.IsEqualTo(call4));
UnknownFunctionResultSet call_set({call1, call3, call4});
EXPECT_EQ(call_set.size(), 3);
auto it = call_set.begin();
EXPECT_EQ(*it++, call3);
EXPECT_EQ(*it++, call4);
EXPECT_EQ(*it++, call1);
}
}
}
}
}
} | 91 |
#ifndef GLOG_INTERNAL_STACKTRACE_H
#define GLOG_INTERNAL_STACKTRACE_H
#include "glog/platform.h"
#if defined(GLOG_USE_GLOG_EXPORT)
# include "glog/export.h"
#endif
#if !defined(GLOG_NO_EXPORT)
# error "stacktrace.h" was not included correctly.
#endif
#include "config.h"
#if defined(HAVE_LIBUNWIND)
# define STACKTRACE_H "stacktrace_libunwind-inl.h"
#elif defined(HAVE_UNWIND)
# define STACKTRACE_H "stacktrace_unwind-inl.h"
#elif !defined(NO_FRAME_POINTER)
# if defined(__i386__) && __GNUC__ >= 2
# define STACKTRACE_H "stacktrace_x86-inl.h"
# elif (defined(__ppc__) || defined(__PPC__)) && __GNUC__ >= 2
# define STACKTRACE_H "stacktrace_powerpc-inl.h"
# elif defined(GLOG_OS_WINDOWS)
# define STACKTRACE_H "stacktrace_windows-inl.h"
# endif
#endif
#if !defined(STACKTRACE_H) && defined(HAVE_EXECINFO_BACKTRACE)
# define STACKTRACE_H "stacktrace_generic-inl.h"
#endif
#if defined(STACKTRACE_H)
# define HAVE_STACKTRACE
#endif
namespace google {
inline namespace glog_internal_namespace_ {
#if defined(HAVE_STACKTRACE)
GLOG_NO_EXPORT int GetStackTrace(void** result, int max_depth, int skip_count);
#endif
}
}
#endif
#include "stacktrace.h"
#if defined(STACKTRACE_H)
# include STACKTRACE_H
#endif | #include "stacktrace.h"
#include <cstdio>
#include <cstdlib>
#include "base/commandlineflags.h"
#include "config.h"
#include "glog/logging.h"
#include "utilities.h"
#ifdef HAVE_EXECINFO_BACKTRACE_SYMBOLS
# include <execinfo.h>
#endif
#ifdef HAVE_STACKTRACE
const int BACKTRACE_STEPS = 6;
struct AddressRange {
const void *start, *end;
};
AddressRange expected_range[BACKTRACE_STEPS];
# if __GNUC__
# define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \
do { \
(prange)->start = &&start_label; \
(prange)->end = &&end_label; \
CHECK_LT((prange)->start, (prange)->end); \
} while (0)
# define DECLARE_ADDRESS_LABEL(a_label) \
a_label: \
do { \
__asm__ __volatile__(""); \
} while (0)
# define ADJUST_ADDRESS_RANGE_FROM_RA(prange) \
do { \
void* ra = __builtin_return_address(0); \
CHECK_LT((prange)->start, ra); \
if (ra > (prange)->end) { \
printf("Adjusting range from %p..%p to %p..%p\n", (prange)->start, \
(prange)->end, (prange)->start, ra); \
(prange)->end = ra; \
} \
} while (0)
# else
# define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \
do { \
(prange)->start = reinterpret_cast<const void*>(&fn); \
(prange)->end = reinterpret_cast<const char*>(&fn) + 256; \
} while (0)
# define DECLARE_ADDRESS_LABEL(a_label) \
do { \
} while (0)
# define ADJUST_ADDRESS_RANGE_FROM_RA(prange) \
do { \
} while (0)
# endif
static void CheckRetAddrIsInFunction(void* ret_addr,
const AddressRange& range) {
CHECK_GE(ret_addr, range.start);
CHECK_LE(ret_addr, range.end);
}
# if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wgnu-label-as-value"
# endif
void ATTRIBUTE_NOINLINE CheckStackTrace(int);
static void ATTRIBUTE_NOINLINE CheckStackTraceLeaf() {
const int STACK_LEN = 10;
void* stack[STACK_LEN];
int size;
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[1]);
INIT_ADDRESS_RANGE(CheckStackTraceLeaf, start, end, &expected_range[0]);
DECLARE_ADDRESS_LABEL(start);
size = google::GetStackTrace(stack, STACK_LEN, 0);
printf("Obtained %d stack frames.\n", size);
CHECK_GE(size, 1);
CHECK_LE(size, STACK_LEN);
if (true) {
# ifdef HAVE_EXECINFO_BACKTRACE_SYMBOLS
char** strings = backtrace_symbols(stack, size);
printf("Obtained %d stack frames.\n", size);
for (int i = 0; i < size; i++) {
printf("%s %p\n", strings[i], stack[i]);
}
union {
void (*p1)(int);
void* p2;
} p = {&CheckStackTrace};
printf("CheckStackTrace() addr: %p\n", p.p2);
free(strings);
# endif
}
for (int i = 0; i < BACKTRACE_STEPS; i++) {
printf("Backtrace %d: expected: %p..%p actual: %p ... ", i,
expected_range[i].start, expected_range[i].end, stack[i]);
fflush(stdout);
CheckRetAddrIsInFunction(stack[i], expected_range[i]);
printf("OK\n");
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace4(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[2]);
INIT_ADDRESS_RANGE(CheckStackTrace4, start, end, &expected_range[1]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTraceLeaf();
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace3(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[3]);
INIT_ADDRESS_RANGE(CheckStackTrace3, start, end, &expected_range[2]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace4(j);
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace2(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[4]);
INIT_ADDRESS_RANGE(CheckStackTrace2, start, end, &expected_range[3]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace3(j);
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace1(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[5]);
INIT_ADDRESS_RANGE(CheckStackTrace1, start, end, &expected_range[4]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace2(j);
}
DECLARE_ADDRESS_LABEL(end);
}
# ifndef __GNUC__
static
# endif
void ATTRIBUTE_NOINLINE
CheckStackTrace(int i) {
INIT_ADDRESS_RANGE(CheckStackTrace, start, end, &expected_range[5]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace1(j);
}
DECLARE_ADDRESS_LABEL(end);
}
# if defined(__clang__)
# pragma clang diagnostic pop
# endif
int main(int, char** argv) {
FLAGS_logtostderr = true;
google::InitGoogleLogging(argv[0]);
CheckStackTrace(0);
printf("PASS\n");
return 0;
}
#else
int main() {
# ifdef GLOG_BAZEL_BUILD
printf("HAVE_STACKTRACE is expected to be defined in Bazel tests\n");
exit(EXIT_FAILURE);
# endif
printf("PASS (no stacktrace support)\n");
return 0;
}
#endif | 156 |
#include "phonenumbers/logger.h"
#include <cstddef>
namespace i18n {
namespace phonenumbers {
Logger* Logger::impl_ = NULL;
}
} | #include <string>
#include <gtest/gtest.h>
#include "phonenumbers/base/memory/scoped_ptr.h"
#include "phonenumbers/default_logger.h"
#include "phonenumbers/logger.h"
namespace i18n {
namespace phonenumbers {
class StringLogger : public Logger {
public:
virtual ~StringLogger() {}
const string& message() const {
return msg_;
}
virtual void WriteMessage(const string& msg) {
msg_ += msg;
}
private:
string msg_;
};
class LoggerTest : public ::testing::Test {
protected:
virtual void SetUp() {
test_logger_.reset(new StringLogger());
test_logger_->set_level(LOG_INFO);
old_logger_ = Logger::mutable_logger_impl();
Logger::set_logger_impl(test_logger_.get());
}
virtual void TearDown() {
Logger::set_logger_impl(old_logger_);
}
scoped_ptr<StringLogger> test_logger_;
Logger* old_logger_;
};
TEST_F(LoggerTest, LoggerIgnoresHigherVerbosity) {
LOG(LOG_DEBUG) << "Hello";
EXPECT_EQ("", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsNewline) {
LOG(LOG_INFO) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerLogsEqualVerbosity) {
LOG(LOG_INFO) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerLogsMoreSeriousMessages) {
LOG(LOG_WARNING) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerConcatenatesMessages) {
LOG(LOG_INFO) << "Hello";
ASSERT_EQ("Hello\n", test_logger_->message());
LOG(LOG_INFO) << " World";
EXPECT_EQ("Hello\n World\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerHandlesDifferentTypes) {
LOG(LOG_INFO) << "Hello " << 42;
EXPECT_EQ("Hello 42\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerIgnoresVerboseLogs) {
VLOG(1) << "Hello";
EXPECT_EQ("", test_logger_->message());
VLOG(0) << "Hello";
EXPECT_EQ("", test_logger_->message());
test_logger_->set_level(LOG_DEBUG);
VLOG(1) << "Hello";
EXPECT_EQ("", test_logger_->message());
VLOG(0) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerShowsDebugLogsAtDebugLevel) {
test_logger_->set_level(LOG_DEBUG);
LOG(LOG_DEBUG) << "Debug hello";
EXPECT_EQ("Debug hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsDebugLogsWhenVerbositySet) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
LOG(LOG_DEBUG) << "Debug hello";
EXPECT_EQ("Debug hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsErrorLogsWhenVerbositySet) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
LOG(ERROR) << "Error hello";
EXPECT_EQ("Error hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsLogsAccordingToVerbosity) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
VLOG(verbose_log_level + 1) << "Hello 3";
EXPECT_EQ("", test_logger_->message());
VLOG(verbose_log_level - 1) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
VLOG(verbose_log_level) << "Hello 2";
EXPECT_EQ("Hello\nHello 2\n", test_logger_->message());
}
}
} | 164 |
#ifndef I18N_PHONENUMBERS_GEOCODING_DATA
#define I18N_PHONENUMBERS_GEOCODING_DATA
#include <cstdint>
namespace i18n {
namespace phonenumbers {
struct CountryLanguages {
const char** available_languages;
const int available_languages_size;
};
struct PrefixDescriptions {
const int32_t* prefixes;
const int prefixes_size;
const char** descriptions;
const int32_t* possible_lengths;
const int possible_lengths_size;
};
const int* get_country_calling_codes();
int get_country_calling_codes_size();
const CountryLanguages* get_country_languages(int index);
const char** get_prefix_language_code_pairs();
int get_prefix_language_code_pairs_size();
const PrefixDescriptions* get_prefix_descriptions(int index);
}
}
#endif | #include <cmath>
#include <set>
#include <string>
#include <gtest/gtest.h>
#include "phonenumbers/base/basictypes.h"
#include "phonenumbers/geocoding/geocoding_data.h"
#include "phonenumbers/geocoding/geocoding_test_data.h"
#include "absl/container/btree_set.h"
namespace i18n {
namespace phonenumbers {
using std::set;
using std::string;
namespace {
typedef const CountryLanguages* (*country_languages_getter)(int index);
typedef const PrefixDescriptions* (*prefix_descriptions_getter)(int index);
void TestCountryLanguages(const CountryLanguages* languages) {
EXPECT_GT(languages->available_languages_size, 0);
for (int i = 0; i < languages->available_languages_size; ++i) {
string language(languages->available_languages[i]);
EXPECT_GT(language.size(), 0);
if (i > 0) {
EXPECT_LT(string(languages->available_languages[i - 1]),
language);
}
}
}
void TestCountryCallingCodeLanguages(
const int* country_calling_codes, int country_calling_codes_size,
country_languages_getter get_country_languages) {
EXPECT_GT(country_calling_codes_size, 0);
for (int i = 0; i < country_calling_codes_size; ++i) {
int code = country_calling_codes[i];
EXPECT_GT(code, 0);
if (i > 0) {
EXPECT_LT(country_calling_codes[i-1], code);
}
TestCountryLanguages(get_country_languages(i));
}
}
void TestPrefixDescriptions(const PrefixDescriptions* descriptions) {
EXPECT_GT(descriptions->prefixes_size, 0);
absl::btree_set<int> possible_lengths;
for (int i = 0; i < descriptions->prefixes_size; ++i) {
int prefix = descriptions->prefixes[i];
EXPECT_GT(prefix, 0);
if (i > 0) {
EXPECT_LT(descriptions->prefixes[i - 1], prefix);
}
possible_lengths.insert(log10(prefix) + 1);
}
EXPECT_GT(descriptions->possible_lengths_size, 0);
for (int i = 0; i < descriptions->possible_lengths_size; ++i) {
int possible_length = descriptions->possible_lengths[i];
EXPECT_GT(possible_length, 0);
if (i > 0) {
EXPECT_LT(descriptions->possible_lengths[i - 1], possible_length);
}
EXPECT_TRUE(
possible_lengths.find(possible_length) != possible_lengths.end());
}
}
void TestAllPrefixDescriptions(
const char** prefix_language_code_pairs,
int prefix_language_code_pairs_size,
prefix_descriptions_getter get_prefix_descriptions) {
EXPECT_GT(prefix_language_code_pairs_size, 0);
for (int i = 0; i < prefix_language_code_pairs_size; ++i) {
string language_code_pair(prefix_language_code_pairs[i]);
EXPECT_GT(language_code_pair.size(), 0);
if (i > 0) {
EXPECT_LT(string(prefix_language_code_pairs[i - 1]),
language_code_pair);
}
TestPrefixDescriptions(get_prefix_descriptions(i));
}
}
}
TEST(GeocodingDataTest, TestCountryCallingCodeLanguages) {
TestCountryCallingCodeLanguages(get_country_calling_codes(),
get_country_calling_codes_size(),
get_country_languages);
}
TEST(GeocodingDataTest, TestTestCountryCallingCodeLanguages) {
TestCountryCallingCodeLanguages(get_test_country_calling_codes(),
get_test_country_calling_codes_size(),
get_test_country_languages);
}
TEST(GeocodingDataTest, TestPrefixDescriptions) {
TestAllPrefixDescriptions(get_prefix_language_code_pairs(),
get_prefix_language_code_pairs_size(),
get_prefix_descriptions);
}
TEST(GeocodingDataTest, TestTestPrefixDescriptions) {
TestAllPrefixDescriptions(get_test_prefix_language_code_pairs(),
get_test_prefix_language_code_pairs_size(),
get_test_prefix_descriptions);
}
TEST(GeocodingDataTest, TestTestGeocodingData) {
ASSERT_EQ(3, get_test_country_calling_codes_size());
const int* country_calling_codes = get_test_country_calling_codes();
const int expected_calling_codes[] = {1, 54, 82};
for (int i = 0; i < get_test_country_calling_codes_size(); ++i) {
EXPECT_EQ(expected_calling_codes[i], country_calling_codes[i]);
}
const CountryLanguages* langs_1 = get_test_country_languages(0);
ASSERT_EQ(2, langs_1->available_languages_size);
const char* expected_languages[] = {"de", "en"};
for (int i = 0; i < langs_1->available_languages_size; ++i) {
EXPECT_STREQ(expected_languages[i], langs_1->available_languages[i]);
}
ASSERT_EQ(5, get_test_prefix_language_code_pairs_size());
const char** language_code_pairs = get_test_prefix_language_code_pairs();
const char* expected_language_code_pairs[] = {
"1_de", "1_en", "54_en", "82_en", "82_ko",
};
for (int i = 0; i < get_test_prefix_language_code_pairs_size(); ++i) {
EXPECT_STREQ(expected_language_code_pairs[i], language_code_pairs[i]);
}
const PrefixDescriptions* desc_1_de = get_test_prefix_descriptions(0);
ASSERT_EQ(2, desc_1_de->prefixes_size);
const int32 expected_prefixes[] = {1201, 1650};
const char* expected_descriptions[] = {
"New Jersey",
"Kalifornien",
};
for (int i = 0; i < desc_1_de->prefixes_size; ++i) {
EXPECT_EQ(expected_prefixes[i], desc_1_de->prefixes[i]);
EXPECT_STREQ(expected_descriptions[i], desc_1_de->descriptions[i]);
}
ASSERT_EQ(1, desc_1_de->possible_lengths_size);
const int expected_lengths[] = {4};
for (int i = 0; i < desc_1_de->possible_lengths_size; ++i) {
EXPECT_EQ(expected_lengths[i], desc_1_de->possible_lengths[i]);
}
}
}
} | 172 |
#ifndef I18N_ADDRESSINPUT_REGION_DATA_CONSTANTS_H_
#define I18N_ADDRESSINPUT_REGION_DATA_CONSTANTS_H_
#include <cstddef>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
class RegionDataConstants {
public:
static bool IsSupported(const std::string& region_code);
static const std::vector<std::string>& GetRegionCodes();
static std::string GetRegionData(const std::string& region_code);
static const std::string& GetDefaultRegionData();
static size_t GetMaxLookupKeyDepth(const std::string& region_code);
RegionDataConstants(const RegionDataConstants&) = delete;
RegionDataConstants& operator=(const RegionDataConstants&) = delete;
};
}
}
#endif
#include "region_data_constants.h"
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <map>
#include <string>
#include <vector>
#include "address_field_util.h"
#include "format_element.h"
#include "lookup_key.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
namespace {
struct RegionData {
const char* const region_code;
const char* const data;
};
const RegionData kRegionData[] = {
{"AC", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"ASCN 1ZZ",)"
R"("languages":"en")"
"}"},
{"AD", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"AD100,AD501,AD700",)"
R"("posturl":"http:
R"("languages":"ca")"
"}"},
{"AE", "{"
R"("fmt":"%N%n%O%n%A%n%S",)"
R"("lfmt":"%N%n%O%n%A%n%S",)"
R"("require":"AS",)"
R"("state_name_type":"emirate",)"
R"("languages":"ar")"
"}"},
{"AF", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"1001,2601,3801",)"
R"("languages":"fa~ps~uz-Arab~tk")"
"}"},
{"AG", "{"
R"("require":"A",)"
R"("languages":"en")"
"}"},
{"AI", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"2640",)"
R"("languages":"en")"
"}"},
{"AL", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"1001,1017,3501",)"
R"("languages":"sq")"
"}"},
{"AM", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C%n%S",)"
R"("lfmt":"%N%n%O%n%A%n%Z%n%C%n%S",)"
R"("zipex":"375010,0002,0010",)"
R"("languages":"hy")"
"}"},
{"AO", "{"
R"("languages":"pt")"
"}"},
{"AQ", "{"
"}"},
{"AR", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%n%S",)"
R"("zipex":"C1070AAM,C1000WAM,B1000TBU,X5187XAB",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"AS", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96799",)"
R"("posturl":"http:
R"("languages":"sm~en")"
"}"},
{"AT", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"1010,3741",)"
R"("posturl":"http:
R"("languages":"de~hr~sl~hu")"
"}"},
{"AU", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("state_name_type":"state",)"
R"("locality_name_type":"suburb",)"
R"("zipex":"2060,3171,6430,4000,4006,3001",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"AW", "{"
R"("languages":"nl~pap")"
"}"},
{"AX", "{"
R"("fmt":"%O%n%N%n%A%nAX-%Z %C%nÅLAND",)"
R"("require":"ACZ",)"
R"("zipex":"22150,22550,22240,22710,22270,22730,22430",)"
R"("posturl":"https:
R"("languages":"sv")"
"}"},
{"AZ", "{"
R"("fmt":"%N%n%O%n%A%nAZ %Z %C",)"
R"("zipex":"1000",)"
R"("languages":"az~az-Cyrl")"
"}"},
{"BA", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"71000",)"
R"("languages":"bs~bs-Cyrl~hr~sr~sr-Latn")"
"}"},
{"BB", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S %Z",)"
R"("state_name_type":"parish",)"
R"("zipex":"BB23026,BB22025",)"
R"("languages":"en")"
"}"},
{"BD", "{"
R"("fmt":"%N%n%O%n%A%n%C - %Z",)"
R"("zipex":"1340,1000",)"
R"("posturl":"https:
R"("languages":"bn")"
"}"},
{"BE", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"4000,1000",)"
R"("posturl":"https:
R"("languages":"nl~fr~de")"
"}"},
{"BF", "{"
R"("fmt":"%N%n%O%n%A%n%C %X",)"
R"("languages":"fr")"
"}"},
{"BG", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000,1700",)"
R"("posturl":"http:
R"("languages":"bg")"
"}"},
{"BH", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"317",)"
R"("languages":"ar")"
"}"},
{"BI", "{"
R"("languages":"rn~fr~en")"
"}"},
{"BJ", "{"
R"("languages":"fr")"
"}"},
{"BL", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97100",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"BM", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"FL 07,HM GX,HM 12",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"BN", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"BT2328,KA1131,BA1511",)"
R"("posturl":"http:
R"("languages":"ms~ms-Arab")"
"}"},
{"BO", "{"
R"("languages":"es~qu~ay")"
"}"},
{"BQ", "{"
R"("languages":"nl")"
"}"},
{"BR", "{"
R"("fmt":"%O%n%N%n%A%n%D%n%C-%S%n%Z",)"
R"("require":"ASCZ",)"
R"("state_name_type":"state",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zipex":"40301-110,70002-900",)"
R"("posturl":"http:
R"("languages":"pt")"
"}"},
{"BS", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S",)"
R"("state_name_type":"island",)"
R"("languages":"en")"
"}"},
{"BT", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"11001,31101,35003",)"
R"("posturl":"https:
R"("languages":"dz")"
"}"},
{"BV", "{"
"}"},
{"BW", "{"
R"("languages":"en~tn")"
"}"},
{"BY", "{"
R"("fmt":"%O%n%N%n%A%n%Z, %C%n%S",)"
R"("zipex":"223016,225860,220050",)"
R"("posturl":"http:
R"("languages":"be~ru")"
"}"},
{"BZ", "{"
R"("languages":"en")"
"}"},
{"CA", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zipex":"H3Z 2Y7,V8X 3X4,T0L 1K0,T0H 1A0,K1A 0B1",)"
R"("posturl":"https:
R"("languages":"en~fr")"
"}"},
{"CC", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"6799",)"
R"("languages":"en")"
"}"},
{"CD", "{"
R"("languages":"sw~lua~fr~ln~kg")"
"}"},
{"CF", "{"
R"("languages":"fr~sg")"
"}"},
{"CG", "{"
R"("languages":"fr")"
"}"},
{"CH", "{"
R"("fmt":"%O%n%N%n%A%nCH-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"2544,1211,1556,3030",)"
R"("posturl":"http:
R"("languages":"de~gsw~fr~it~rm")"
"}"},
{"CI", "{"
R"("fmt":"%N%n%O%n%X %A %C %X",)"
R"("languages":"fr")"
"}"},
{"CK", "{"
R"("languages":"en")"
"}"},
{"CL", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%n%S",)"
R"("zipex":"8340457,8720019,1230000,8329100",)"
R"("languages":"es")"
"}"},
{"CM", "{"
R"("languages":"fr~en")"
"}"},
{"CN", "{"
R"("fmt":"%Z%n%S%C%D%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A%n%D%n%C%n%S, %Z",)"
R"("require":"ACSZ",)"
R"("sublocality_name_type":"district",)"
R"("zipex":"266033,317204,100096,100808",)"
R"("posturl":"http:
R"("languages":"zh")"
"}"},
{"CO", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C, %S, %Z",)"
R"("require":"AS",)"
R"("state_name_type":"department",)"
R"("zipex":"111221,130001,760011",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"CR", "{"
R"("fmt":"%N%n%O%n%A%n%S, %C%n%Z",)"
R"("require":"ACS",)"
R"("zipex":"1000,2010,1001",)"
R"("posturl":"https:
R"("languages":"es")"
"}"},
{"CU", "{"
R"("fmt":"%N%n%O%n%A%n%C %S%n%Z",)"
R"("zipex":"10700",)"
R"("languages":"es")"
"}"},
{"CV", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%n%S",)"
R"("state_name_type":"island",)"
R"("zipex":"7600",)"
R"("languages":"pt")"
"}"},
{"CW", "{"
R"("languages":"pap~nl")"
"}"},
{"CX", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"6798",)"
R"("languages":"en")"
"}"},
{"CY", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"2008,3304,1900",)"
R"("languages":"el~tr")"
"}"},
{"CZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"100 00,251 66,530 87,110 00,225 99",)"
R"("posturl":"http:
R"("languages":"cs")"
"}"},
{"DE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"26133,53225",)"
R"("posturl":"http:
R"("languages":"de~frr")"
"}"},
{"DJ", "{"
R"("languages":"ar~fr")"
"}"},
{"DK", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"8660,1566",)"
R"("posturl":"http:
R"("languages":"da~de~kl")"
"}"},
{"DM", "{"
R"("languages":"en")"
"}"},
{"DO", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"11903,10101",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"DZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"40304,16027",)"
R"("languages":"ar~fr")"
"}"},
{"EC", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"090105,092301",)"
R"("posturl":"http:
R"("languages":"es~qu")"
"}"},
{"EE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACZ",)"
R"("zipex":"69501,11212",)"
R"("posturl":"https:
R"("languages":"et")"
"}"},
{"EG", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S%n%Z",)"
R"("zipex":"4460232,5734356",)"
R"("languages":"ar")"
"}"},
{"EH", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"70000,72000",)"
R"("languages":"ar")"
"}"},
{"ER", "{"
R"("languages":"ti~en~ar")"
"}"},
{"ES", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACSZ",)"
R"("zipex":"28039,28300,28070",)"
R"("posturl":"http:
R"("languages":"es~ca~gl~eu")"
"}"},
{"ET", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000",)"
R"("languages":"am")"
"}"},
{"FI", "{"
R"("fmt":"%O%n%N%n%A%nFI-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"00550,00011",)"
R"("posturl":"https:
R"("languages":"fi~sv~sms")"
"}"},
{"FJ", "{"
R"("languages":"en~hif~fj")"
"}"},
{"FK", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"FIQQ 1ZZ",)"
R"("languages":"en")"
"}"},
{"FM", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96941,96944",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"FO", "{"
R"("fmt":"%N%n%O%n%A%nFO%Z %C",)"
R"("zipex":"100",)"
R"("posturl":"https:
R"("languages":"fo")"
"}"},
{"FR", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"33380,34092,33506",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"GA", "{"
R"("languages":"fr")"
"}"},
{"GB", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("locality_name_type":"post_town",)"
R"("zipex":"EC1Y 8SY,GIR 0AA,M2 5BQ,M34 4AB,CR0 2YR,DN16 9AA,W1A 4ZZ,EC1A 1HQ,OX14 4PG,BS18 8HF,NR25 7HG,RH6 0NP,BH23 6AA,B6 5BA,SO23 9AP,PO1 3AX,BFPO 61",)"
R"("posturl":"http:
R"("languages":"en~cy~ga~gd")"
"}"},
{"GD", "{"
R"("languages":"en")"
"}"},
{"GE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"0101",)"
R"("posturl":"http:
R"("languages":"ka~ab~os")"
"}"},
{"GF", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97300",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"GG", "{"
R"("fmt":"%N%n%O%n%A%n%C%nGUERNSEY%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"GY1 1AA,GY2 2BT",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"GH", "{"
R"("languages":"ak~en~ee~gaa")"
"}"},
{"GI", "{"
R"("fmt":"%N%n%O%n%A%nGIBRALTAR%n%Z",)"
R"("require":"A",)"
R"("zipex":"GX11 1AA",)"
R"("languages":"en")"
"}"},
{"GL", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"3900,3950,3911",)"
R"("languages":"kl")"
"}"},
{"GM", "{"
R"("languages":"en")"
"}"},
{"GN", "{"
R"("fmt":"%N%n%O%n%Z %A %C",)"
R"("zipex":"001,200,100",)"
R"("languages":"fr")"
"}"},
{"GP", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97100",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"GQ", "{"
R"("languages":"es~fr~pt")"
"}"},
{"GR", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"151 24,151 10,101 88",)"
R"("posturl":"https:
R"("languages":"el")"
"}"},
{"GS", "{"
R"("fmt":"%N%n%O%n%A%n%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"SIQQ 1ZZ")"
"}"},
{"GT", "{"
R"("fmt":"%N%n%O%n%A%n%Z- %C",)"
R"("zipex":"09001,01501",)"
R"("languages":"es~quc")"
"}"},
{"GU", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("require":"ACZ",)"
R"("zip_name_type":"zip",)"
R"("zipex":"96910,96931",)"
R"("posturl":"http:
R"("languages":"en~ch")"
"}"},
{"GW", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000,1011",)"
R"("languages":"pt")"
"}"},
{"GY", "{"
R"("languages":"en")"
"}"},
{"HK", "{"
R"("fmt":"%S%n%C%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S",)"
R"("require":"AS",)"
R"("state_name_type":"area",)"
R"("locality_name_type":"district",)"
R"("languages":"zh-Hant~en")"
"}"},
{"HM", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"7050")"
"}"},
{"HN", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S%n%Z",)"
R"("require":"ACS",)"
R"("state_name_type":"department",)"
R"("zipex":"31301",)"
R"("languages":"es")"
"}"},
{"HR", "{"
R"("fmt":"%N%n%O%n%A%nHR-%Z %C",)"
R"("zipex":"10000,21001,10002",)"
R"("posturl":"http:
R"("languages":"hr~it~vec")"
"}"},
{"HT", "{"
R"("fmt":"%N%n%O%n%A%nHT%Z %C",)"
R"("zipex":"6120,5310,6110,8510",)"
R"("languages":"ht~fr")"
"}"},
{"HU", "{"
R"("fmt":"%N%n%O%n%C%n%A%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"1037,2380,1540",)"
R"("posturl":"http:
R"("languages":"hu")"
"}"},
{"ID", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S %Z",)"
R"("require":"AS",)"
R"("zipex":"40115",)"
R"("languages":"id")"
"}"},
{"IE", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C%n%S%n%Z",)"
R"("zip_name_type":"eircode",)"
R"("state_name_type":"county",)"
R"("sublocality_name_type":"townland",)"
R"("zipex":"A65 F4E2",)"
R"("posturl":"https:
R"("languages":"en")"
"}"},
{"IL", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"9614303",)"
R"("posturl":"http:
R"("languages":"iw~ar")"
"}"},
{"IM", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"IM2 1AA,IM99 1PS",)"
R"("posturl":"https:
R"("languages":"en~gv")"
"}"},
{"IN", "{"
R"("fmt":"%N%n%O%n%A%n%T%n%F%n%L%n%C %Z%n%S",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"pin",)"
R"("state_name_type":"state",)"
R"("zipex":"110034,110001",)"
R"("posturl":"https:
R"("languages":"en~hi")"
"}"},
{"IO", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"BBND 1ZZ",)"
R"("languages":"en")"
"}"},
{"IQ", "{"
R"("fmt":"%O%n%N%n%A%n%C, %S%n%Z",)"
R"("require":"ACS",)"
R"("zipex":"31001",)"
R"("languages":"ar")"
"}"},
{"IR", "{"
R"("fmt":"%O%n%N%n%S%n%C, %D%n%A%n%Z",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zipex":"11936-12345",)"
R"("languages":"fa")"
"}"},
{"IS", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"320,121,220,110",)"
R"("posturl":"https:
R"("languages":"is")"
"}"},
{"IT", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACSZ",)"
R"("zipex":"00144,47037,39049",)"
R"("posturl":"http:
R"("languages":"it")"
"}"},
{"JE", "{"
R"("fmt":"%N%n%O%n%A%n%C%nJERSEY%n%Z",)"
R"("require":"ACZ",)"
R"("zipex":"JE1 1AA,JE2 2BT",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"JM", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S %X",)"
R"("require":"ACS",)"
R"("state_name_type":"parish",)"
R"("languages":"en")"
"}"},
{"JO", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"11937,11190",)"
R"("languages":"ar")"
"}"},
{"JP", "{"
R"("fmt":"〒%Z%n%S%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A, %S%n%Z",)"
R"("require":"ASZ",)"
R"("state_name_type":"prefecture",)"
R"("zipex":"154-0023,350-1106,951-8073,112-0001,208-0032,231-0012",)"
R"("posturl":"http:
R"("languages":"ja")"
"}"},
{"KE", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"20100,00100",)"
R"("languages":"sw~en")"
"}"},
{"KG", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"720001",)"
R"("languages":"ky~ru")"
"}"},
{"KH", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"120101,120108",)"
R"("posturl":"https:
R"("languages":"km")"
"}"},
{"KI", "{"
R"("fmt":"%N%n%O%n%A%n%S%n%C",)"
R"("state_name_type":"island",)"
R"("languages":"en~gil")"
"}"},
{"KM", "{"
R"("languages":"ar~fr~zdj~wni")"
"}"},
{"KN", "{"
R"("fmt":"%N%n%O%n%A%n%C, %S",)"
R"("require":"ACS",)"
R"("state_name_type":"island",)"
R"("languages":"en")"
"}"},
{"KP", "{"
R"("fmt":"%Z%n%S%n%C%n%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A%n%C%n%S, %Z",)"
R"("languages":"ko")"
"}"},
{"KR", "{"
R"("fmt":"%S %C%D%n%A%n%O%n%N%n%Z",)"
R"("lfmt":"%N%n%O%n%A%n%D%n%C%n%S%n%Z",)"
R"("require":"ACSZ",)"
R"("state_name_type":"do_si",)"
R"("sublocality_name_type":"district",)"
R"("zipex":"03051",)"
R"("posturl":"http:
R"("languages":"ko")"
"}"},
{"KW", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"54541,54551,54404,13009",)"
R"("languages":"ar")"
"}"},
{"KY", "{"
R"("fmt":"%N%n%O%n%A%n%S %Z",)"
R"("require":"AS",)"
R"("state_name_type":"island",)"
R"("zipex":"KY1-1100,KY1-1702,KY2-2101",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"KZ", "{"
R"("fmt":"%Z%n%S%n%C%n%A%n%O%n%N",)"
R"("zipex":"040900,050012",)"
R"("languages":"kk~ru")"
"}"},
{"LA", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"01160,01000",)"
R"("languages":"lo")"
"}"},
{"LB", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"2038 3054,1107 2810,1000",)"
R"("languages":"ar")"
"}"},
{"LC", "{"
R"("languages":"en")"
"}"},
{"LI", "{"
R"("fmt":"%O%n%N%n%A%nFL-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"9496,9491,9490,9485",)"
R"("posturl":"http:
R"("languages":"de~gsw")"
"}"},
{"LK", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"20000,00100",)"
R"("posturl":"http:
R"("languages":"si~ta")"
"}"},
{"LR", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1000",)"
R"("languages":"en")"
"}"},
{"LS", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"100",)"
R"("languages":"st~en")"
"}"},
{"LT", "{"
R"("fmt":"%O%n%N%n%A%nLT-%Z %C %S",)"
R"("require":"ACZ",)"
R"("zipex":"04340,03500",)"
R"("posturl":"http:
R"("languages":"lt")"
"}"},
{"LU", "{"
R"("fmt":"%O%n%N%n%A%nL-%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"4750,2998",)"
R"("posturl":"https:
R"("languages":"fr~lb~de")"
"}"},
{"LV", "{"
R"("fmt":"%N%n%O%n%A%n%S%n%C, %Z",)"
R"("require":"ACZ",)"
R"("zipex":"LV-1073,LV-1000",)"
R"("posturl":"https:
R"("languages":"lv")"
"}"},
{"LY", "{"
R"("languages":"ar")"
"}"},
{"MA", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"53000,10000,20050,16052",)"
R"("languages":"ar~fr~tzm")"
"}"},
{"MC", "{"
R"("fmt":"%N%n%O%n%A%nMC-%Z %C %X",)"
R"("zipex":"98000,98020,98011,98001",)"
R"("languages":"fr")"
"}"},
{"MD", "{"
R"("fmt":"%N%n%O%n%A%nMD-%Z %C",)"
R"("zipex":"2012,2019",)"
R"("languages":"ro")"
"}"},
{"ME", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"81257,81258,81217,84314,85366",)"
R"("languages":"sr-Latn")"
"}"},
{"MF", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97100",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"MG", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"501,101",)"
R"("languages":"mg~fr~en")"
"}"},
{"MH", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96960,96970",)"
R"("posturl":"http:
R"("languages":"en~mh")"
"}"},
{"MK", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"1314,1321,1443,1062",)"
R"("languages":"mk~sq")"
"}"},
{"ML", "{"
R"("languages":"fr")"
"}"},
{"MM", "{"
R"("fmt":"%N%n%O%n%A%n%C, %Z",)"
R"("zipex":"11181",)"
R"("languages":"my")"
"}"},
{"MN", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S %Z",)"
R"("zipex":"65030,65270",)"
R"("posturl":"http:
R"("languages":"mn")"
"}"},
{"MO", "{"
R"("fmt":"%A%n%O%n%N",)"
R"("lfmt":"%N%n%O%n%A",)"
R"("require":"A",)"
R"("languages":"zh-Hant~pt")"
"}"},
{"MP", "{"
R"("fmt":"%N%n%O%n%A%n%C %S %Z",)"
R"("require":"ACSZ",)"
R"("zip_name_type":"zip",)"
R"("state_name_type":"state",)"
R"("zipex":"96950,96951,96952",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"MQ", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97220",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"MR", "{"
R"("languages":"ar")"
"}"},
{"MS", "{"
R"("languages":"en")"
"}"},
{"MT", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"NXR 01,ZTN 05,GPO 01,BZN 1130,SPB 6031,VCT 1753",)"
R"("posturl":"https:
R"("languages":"mt~en")"
"}"},
{"MU", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"42602",)"
R"("languages":"en~fr")"
"}"},
{"MV", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"20026",)"
R"("posturl":"http:
R"("languages":"dv")"
"}"},
{"MW", "{"
R"("fmt":"%N%n%O%n%A%n%C %X",)"
R"("languages":"en~ny")"
"}"},
{"MX", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%Z %C, %S",)"
R"("require":"ACSZ",)"
R"("state_name_type":"state",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zipex":"02860,77520,06082",)"
R"("posturl":"https:
R"("languages":"es")"
"}"},
{"MY", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%Z %C%n%S",)"
R"("require":"ACZ",)"
R"("state_name_type":"state",)"
R"("sublocality_name_type":"village_township",)"
R"("zipex":"43000,50754,88990,50670",)"
R"("posturl":"http:
R"("languages":"ms")"
"}"},
{"MZ", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C%S",)"
R"("zipex":"1102,1119,3212",)"
R"("languages":"pt")"
"}"},
{"NA", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R"("zipex":"10001,10017",)"
R"("languages":"en")"
"}"},
{"NC", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"98814,98800,98810",)"
R"("posturl":"https:
R"("languages":"fr")"
"}"},
{"NE", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("zipex":"8001",)"
R"("languages":"fr")"
"}"},
{"NF", "{"
R"("fmt":"%O%n%N%n%A%n%C %S %Z",)"
R"("zipex":"2899",)"
R"("languages":"en")"
"}"},
{"NG", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C %Z%n%S",)"
R"("state_name_type":"state",)"
R"("zipex":"930283,300001,931104",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"NI", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C, %S",)"
R"("state_name_type":"department",)"
R"("zipex":"52000",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"NL", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"1234 AB,2490 AA",)"
R"("posturl":"http:
R"("languages":"nl~fy")"
"}"},
{"NO", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("locality_name_type":"post_town",)"
R"("zipex":"0025,0107,6631",)"
R"("posturl":"http:
R"("languages":"no~nn~se")"
"}"},
{"NP", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z",)"
R"("zipex":"44601",)"
R"("posturl":"http:
R"("languages":"ne")"
"}"},
{"NR", "{"
R"("fmt":"%N%n%O%n%A%n%S",)"
R"("require":"AS",)"
R"("state_name_type":"district",)"
R"("languages":"en")"
"}"},
{"NU", "{"
R"("languages":"en~niu")"
"}"},
{"NZ", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C %Z",)"
R"("require":"ACZ",)"
R"("zipex":"6001,6015,6332,8252,1030",)"
R"("posturl":"https:
R"("languages":"en~mi")"
"}"},
{"OM", "{"
R"("fmt":"%N%n%O%n%A%n%Z%n%C",)"
R"("zipex":"133,112,111",)"
R"("languages":"ar")"
"}"},
{"PA", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%S",)"
R"("languages":"es")"
"}"},
{"PE", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z%n%S",)"
R"("locality_name_type":"district",)"
R"("zipex":"LIMA 23,LIMA 42,CALLAO 2,02001",)"
R"("posturl":"http:
R"("languages":"es")"
"}"},
{"PF", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C %S",)"
R"("require":"ACSZ",)"
R"("state_name_type":"island",)"
R"("zipex":"98709",)"
R"("languages":"fr~ty")"
"}"},
{"PG", "{"
R"("fmt":"%N%n%O%n%A%n%C %Z %S",)"
R"("require":"ACS",)"
R"("zipex":"111",)"
R"("languages":"tpi~en~ho")"
"}"},
{"PH", "{"
R"("fmt":"%N%n%O%n%A%n%D, %C%n%Z %S",)"
R"("zipex":"1008,1050,1135,1207,2000,1000",)"
R"("posturl":"http:
R"("languages":"en")"
"}"},
{"PK", "{"
R"("fmt":"%N%n%O%n%A%n%D%n%C-%Z",)"
R"("zipex":"44000",)"
R"("posturl":"http:
R"("languages":"ur~en")"
"}"},
{"PL", "{"
R"("fmt":"%N%n%O%n%A%n%Z %C",)"
R"("require":"ACZ",)"
R"("zipex":"00-950,05-470,48-300,32-015,00-940",)"
R"("posturl":"http:
R"("languages":"pl~de~csb~lt")"
"}"},
{"PM", "{"
R"("fmt":"%O%n%N%n%A%n%Z %C %X",)"
R"("require":"ACZ",)"
R"("zipex":"97500",)"
R"("languages":"fr")"
"}"},
{"PN", "{"
R"("fmt":"%N%n%O%n%A%n%C%n%Z",)"
R | #include "region_data_constants.h"
#include <algorithm>
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::RegionDataConstants;
class RegionCodeTest : public testing::TestWithParam<std::string> {
public:
RegionCodeTest(const RegionCodeTest&) = delete;
RegionCodeTest& operator=(const RegionCodeTest&) = delete;
protected:
RegionCodeTest() = default;
};
TEST_P(RegionCodeTest, RegionCodeHasTwoCharacters) {
EXPECT_EQ(2, GetParam().length());
}
INSTANTIATE_TEST_SUITE_P(
AllRegionCodes, RegionCodeTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
testing::AssertionResult HasCurlyBraces(const std::string& data) {
if (data.empty()) {
return testing::AssertionFailure() << "data is empty";
}
if (data[0] != '{') {
return testing::AssertionFailure() << data << " does not start with '{'";
}
if (data[data.length() - 1] != '}') {
return testing::AssertionFailure() << data << " does not end with '}'";
}
return testing::AssertionSuccess();
}
TEST(DefaultRegionDataTest, DefaultRegionHasCurlyBraces) {
EXPECT_TRUE(HasCurlyBraces(RegionDataConstants::GetDefaultRegionData()));
}
class RegionDataTest : public testing::TestWithParam<std::string> {
public:
RegionDataTest(const RegionDataTest&) = delete;
RegionDataTest& operator=(const RegionDataTest&) = delete;
protected:
RegionDataTest() = default;
std::string GetData() const {
return RegionDataConstants::GetRegionData(GetParam());
}
};
TEST_P(RegionDataTest, RegionDataHasCurlyBraces) {
EXPECT_TRUE(HasCurlyBraces(GetData()));
}
INSTANTIATE_TEST_SUITE_P(
AllRegionData, RegionDataTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
TEST(RegionDataConstantsTest, GetMaxLookupKeyDepth) {
EXPECT_EQ(0, RegionDataConstants::GetMaxLookupKeyDepth("NZ"));
EXPECT_EQ(1, RegionDataConstants::GetMaxLookupKeyDepth("KY"));
EXPECT_EQ(2, RegionDataConstants::GetMaxLookupKeyDepth("US"));
EXPECT_EQ(3, RegionDataConstants::GetMaxLookupKeyDepth("CN"));
}
TEST(RegionDataConstantsTest, RegionCodesSorted) {
EXPECT_TRUE(std::is_sorted(RegionDataConstants::GetRegionCodes().begin(),
RegionDataConstants::GetRegionCodes().end()));
}
} | 482 |
#ifndef TENSORFLOW_LITE_NNAPI_NNAPI_IMPLEMENTATION_H_
#define TENSORFLOW_LITE_NNAPI_NNAPI_IMPLEMENTATION_H_
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <memory>
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
struct NnApi {
bool nnapi_exists;
int32_t android_sdk_version;
int64_t nnapi_runtime_feature_level;
int (*ANeuralNetworksMemory_createFromFd)(size_t size, int protect, int fd,
size_t offset,
ANeuralNetworksMemory** memory);
void (*ANeuralNetworksMemory_free)(ANeuralNetworksMemory* memory);
int (*ANeuralNetworksModel_create)(ANeuralNetworksModel** model);
void (*ANeuralNetworksModel_free)(ANeuralNetworksModel* model);
int (*ANeuralNetworksModel_finish)(ANeuralNetworksModel* model);
int (*ANeuralNetworksModel_addOperand)(
ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
int (*ANeuralNetworksModel_setOperandValue)(ANeuralNetworksModel* model,
int32_t index, const void* buffer,
size_t length);
int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams)(
ANeuralNetworksModel* model, int32_t index,
const ANeuralNetworksSymmPerChannelQuantParams* channelQuant);
int (*ANeuralNetworksModel_setOperandValueFromMemory)(
ANeuralNetworksModel* model, int32_t index,
const ANeuralNetworksMemory* memory, size_t offset, size_t length);
int (*ANeuralNetworksModel_addOperation)(ANeuralNetworksModel* model,
ANeuralNetworksOperationType type,
uint32_t inputCount,
const uint32_t* inputs,
uint32_t outputCount,
const uint32_t* outputs);
int (*ANeuralNetworksModel_identifyInputsAndOutputs)(
ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
uint32_t outputCount, const uint32_t* outputs);
int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16)(
ANeuralNetworksModel* model, bool allow);
int (*ANeuralNetworksCompilation_create)(
ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
void (*ANeuralNetworksCompilation_free)(
ANeuralNetworksCompilation* compilation);
int (*ANeuralNetworksCompilation_setPreference)(
ANeuralNetworksCompilation* compilation, int32_t preference);
int (*ANeuralNetworksCompilation_finish)(
ANeuralNetworksCompilation* compilation);
int (*ANeuralNetworksExecution_create)(
ANeuralNetworksCompilation* compilation,
ANeuralNetworksExecution** execution);
void (*ANeuralNetworksExecution_free)(ANeuralNetworksExecution* execution);
int (*ANeuralNetworksExecution_setInput)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type, const void* buffer,
size_t length);
int (*ANeuralNetworksExecution_setInputFromMemory)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type,
const ANeuralNetworksMemory* memory, size_t offset, size_t length);
int (*ANeuralNetworksExecution_setOutput)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type, void* buffer, size_t length);
int (*ANeuralNetworksExecution_setOutputFromMemory)(
ANeuralNetworksExecution* execution, int32_t index,
const ANeuralNetworksOperandType* type,
const ANeuralNetworksMemory* memory, size_t offset, size_t length);
int (*ANeuralNetworksExecution_startCompute)(
ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
int (*ANeuralNetworksEvent_wait)(ANeuralNetworksEvent* event);
void (*ANeuralNetworksEvent_free)(ANeuralNetworksEvent* event);
int (*ASharedMemory_create)(const char* name, size_t size);
int (*ANeuralNetworks_getDeviceCount)(uint32_t* numDevices);
int (*ANeuralNetworks_getDevice)(uint32_t devIndex,
ANeuralNetworksDevice** device);
int (*ANeuralNetworksDevice_getName)(const ANeuralNetworksDevice* device,
const char** name);
int (*ANeuralNetworksDevice_getVersion)(const ANeuralNetworksDevice* device,
const char** version);
int (*ANeuralNetworksDevice_getFeatureLevel)(
const ANeuralNetworksDevice* device, int64_t* featureLevel);
int (*ANeuralNetworksDevice_getType)(const ANeuralNetworksDevice* device,
int32_t* type);
int (*ANeuralNetworksModel_getSupportedOperationsForDevices)(
const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
bool* supportedOps);
int (*ANeuralNetworksCompilation_createForDevices)(
ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices,
uint32_t numDevices, ANeuralNetworksCompilat | #include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include <gtest/gtest.h>
namespace {
TEST(NnapiLibTest, NnApiImplementation) {
const NnApi* nnapi = NnApiImplementation();
EXPECT_NE(nnapi, nullptr);
#ifdef __ANDROID__
EXPECT_GT(nnapi->android_sdk_version, 0);
if (nnapi.android_sdk_version < 27) {
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
} else {
EXPECT_TRUE(nnapi->nnapi_exists);
EXPECT_NE(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
if (nnapi->android_sdk_version >= 28) {
EXPECT_NE(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
} else {
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
}
EXPECT_NE(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_NE(nnapi->ASharedMemory_create, nullptr);
}
#else
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->android_sdk_version, 0);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDeviceCount, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDevice, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getName, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getVersion, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getFeatureLevel, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_createForDevices, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setCaching, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_compute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandRank, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_burstCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setMeasureTiming, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getDuration, nullptr);
#endif
}
} | 826 |
#ifndef TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_SIMPLE_TF_OP_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_SIMPLE_TF_OP_H_
#include "tensorflow/lite/kernels/shim/test_op/simple_op.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
class SimpleOpKernel : public TfOpKernel<SimpleOp> {
public:
using TfOpKernel::TfOpKernel;
};
}
}
#endif
#include "tensorflow/lite/kernels/shim/test_op/simple_tf_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
namespace tflite {
namespace shim {
REGISTER_TF_OP_SHIM(SimpleOpKernel);
REGISTER_KERNEL_BUILDER(
Name(SimpleOpKernel::OpName()).Device(::tensorflow::DEVICE_CPU),
SimpleOpKernel);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/tstring.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_INT64;
using ::tensorflow::DT_STRING;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::tstring;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class SimpleOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(SimpleOpTfTest, Output1Size_5_N_2) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 5)
.Attr("output2_suffix", "foo")
.Attr("N", 2)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(2, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abc"});
AddInputFromArray<int64_t>(TensorShape({}), {123});
AddInputFromArray<int64_t>(TensorShape({2}), {456, 789});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(
*GetOutput(1), AsTensor<float>({0, 0.5, 1., 1.5, 2.}, {5}));
ExpectTensorEqual<tstring>(
*GetOutput(2), AsTensor<tstring>({"0", "1", "2", "foo"}, {4}));
ExpectTensorEqual<int64_t>(*GetOutput(3),
AsTensor<int64_t>({124}, {}));
ExpectTensorEqual<int64_t>(*GetOutput(4),
AsTensor<int64_t>({457, 790}, {2}));
}
TEST_F(SimpleOpTfTest, Output1Size_3_N_0) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 3)
.Attr("output2_suffix", "foo")
.Attr("N", 0)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(0, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abcde"});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(*GetOutput(1),
AsTensor<float>({0, 0.5, 1.}, {3}));
ExpectTensorEqual<tstring>(
*GetOutput(2),
AsTensor<tstring>({"0", "1", "2", "3", "4", "foo"}, {6}));
}
}
}
} | 934 |
#ifndef TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TF_OP_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TF_OP_H_
#include "tensorflow/lite/kernels/shim/test_op/tmpl_op.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
template <typename AType, typename BType>
class TmplOpKernel : public TfOpKernel<TmplOp, AType, BType> {
public:
using TfOpKernel<TmplOp, AType, BType>::TfOpKernel;
};
}
}
#endif
#include "tensorflow/lite/kernels/shim/test_op/tmpl_tf_op.h"
#include <cstdint>
#include "tensorflow/core/framework/types.h"
namespace tflite {
namespace shim {
using TmplOpKernelInstance = TmplOpKernel<float, int32_t>;
REGISTER_TF_OP_SHIM(TmplOpKernelInstance);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<float>("AType")
.TypeConstraint<int32_t>("BType"),
TmplOpKernel<float, int32_t>);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<int32_t>("AType")
.TypeConstraint<int64_t>("BType"),
TmplOpKernel<int32_t, int64_t>);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_FLOAT;
using ::tensorflow::DT_INT32;
using ::tensorflow::DT_INT64;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class TmplOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(TmplOpTfTest, float_int32) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_FLOAT)
.Attr("BType", DT_INT32)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {10.5});
AddInputFromArray<int32_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0),
AsTensor<float>({30.5}, {}));
}
TEST_F(TmplOpTfTest, int32_int64) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_INT32)
.Attr("BType", DT_INT64)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<int32_t>(TensorShape({}), {10});
AddInputFromArray<int64_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0), AsTensor<float>({30}, {}));
}
}
}
} | 935 |
#ifndef TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_H_
#define TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_H_
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/eager/c_api.h"
#ifdef __cplusplus
extern "C" {
#endif
TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset,
const char* op_or_function_name,
const char* raw_device_name,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextEnableGraphCollection(TFE_Context* ctx);
TF_CAPI_EXPORT extern void TFE_ContextDisableGraphCollection(TFE_Context* ctx);
typedef struct TFE_MonitoringCounterCell TFE_MonitoringCounterCell;
TF_CAPI_EXPORT extern void TFE_MonitoringCounterCellIncrementBy(
TFE_MonitoringCounterCell* cell, int64_t value);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringCounterCellValue(
TFE_MonitoringCounterCell* cell);
typedef struct TFE_MonitoringCounter0 TFE_MonitoringCounter0;
TF_CAPI_EXPORT extern TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(
const char* name, TF_Status* status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter0(
TFE_MonitoringCounter0* counter);
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter0(
TFE_MonitoringCounter0* counter);
typedef struct TFE_MonitoringCounter1 TFE_MonitoringCounter1;
TF_CAPI_EXPORT extern TFE_MonitoringCounter1* TFE_MonitoringNewCounter1(
const char* name, TF_Status* status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter1(
TFE_MonitoringCounter1* counter);
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter1(
TFE_MonitoringCounter1* counter, const char* label1);
typedef struct TFE_MonitoringCounter2 TFE_MonitoringCounter2;
TF_CAPI_EXPORT extern TFE_MonitoringCounter2* TFE_MonitoringNewCounter2(
const char* name, TF_Status* status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter2(
TFE_MonitoringCounter2* counter);
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter2(
TFE_MonitoringCounter2* counter, const char* label1, const char* label2);
typedef struct TFE_MonitoringIntGaugeCell TFE_MonitoringIntGaugeCell;
TF_CAPI_EXPORT extern void TFE_MonitoringIntGaugeCellSet(
TFE_MonitoringIntGaugeCell* cell, int64_t value);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringIntGaugeCellValue(
TFE_MonitoringIntGaugeCell* cell);
typedef struct TFE_MonitoringIntGauge0 TFE_MonitoringIntGauge0;
TF_CAPI_EXPORT extern TFE_MonitoringIntGauge0* TFE_MonitoringNewIntGauge0(
const char* name, TF_Status* out_status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge0(
TFE_MonitoringIntGauge0* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
TFE_MonitoringGetCellIntGauge0(TFE_MonitoringIntGauge0* gauge);
typedef struct TFE_MonitoringIntGauge1 TFE_MonitoringIntGauge1;
TF_CAPI_EXPORT extern TFE_MonitoringIntGauge1* TFE_MonitoringNewIntGauge1(
const char* name, TF_Status* out_status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge1(
TFE_MonitoringIntGauge1* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
TFE_MonitoringGetCellIntGauge1(TFE_MonitoringIntGauge1* gauge,
const char* label1);
typedef struct TFE_MonitoringIntGauge2 TFE_MonitoringIntGauge2;
TF_CAPI_EXPORT extern TFE_MonitoringIntGauge2* TFE_MonitoringNewIntGauge2(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge2(
TFE_MonitoringIntGauge2* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
TFE_MonitoringGetCellIntGauge2(TFE_MonitoringIntGauge2* gauge,
const char* label1, const char* label2);
typedef struct TFE_MonitoringStringGaugeCell TFE_MonitoringStringGaugeCell;
TF_CAPI_EXPORT extern void TFE_MonitoringStringGaugeCellSet(
TFE_MonitoringStringGaugeCell* cell, const char* value);
TF_CAPI_EXPORT extern const void TFE_MonitoringStringGaugeCellValue(
TFE_MonitoringStringGaugeCell* cell, TF_Buffer* buf);
typedef struct TFE_MonitoringStringGauge0 TFE_MonitoringStringGauge0;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge0* TFE_MonitoringNewStringGauge0(
const char* name, TF_Status* out_status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge0(
TFE_MonitoringStringGauge0* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge0(TFE_MonitoringStringGauge0* gauge);
typedef struct TFE_MonitoringStringGauge1 TFE_MonitoringStringGauge1;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge1* TFE_MonitoringNewStringGauge1(
const char* name, TF_Status* out_status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge1(
TFE_MonitoringStringGauge1* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge1(TFE_MonitoringStringGauge1* gauge,
const char* label1);
typedef struct TFE_MonitoringStringGauge2 TFE_MonitoringStringGauge2;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge2* TFE_MonitoringNewStringGauge2(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge2(
TFE_MonitoringStringGauge2* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge2(TFE_MonitoringStringGauge2* gauge,
const char* label1, const char* label2);
typedef struct TFE_MonitoringStringGauge3 TFE_MonitoringStringGauge3;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge3* TFE_MonitoringNewStringGauge3(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2, const char* label3);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge3(
TFE_MonitoringStringGauge3* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge3(TFE_MonitoringStringGauge3* gauge,
const char* label1, const char* label2,
const char* label3);
typedef struct TFE_MonitoringStringGauge4 TFE_MonitoringStringGauge4;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge4* TFE_MonitoringNewStringGauge4(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2, const char* label3,
const char* label4);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge4(
TFE_MonitoringStringGauge4* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge4(TFE_MonitoringStringGauge4* gauge,
const char* label1, const char* label2,
const char* label3, const char* label4);
typedef struct TFE_MonitoringBoolGaugeCell TFE_MonitoringBoolGaugeCell;
TF_CAPI_EXPORT extern void TFE_MonitoringBoolGaugeCellSet(
TFE_MonitoringBoolGaugeCell* cell, bool value);
TF_CAPI_EXPORT extern bool TFE_MonitoringBoolGaugeCellValue(
TFE_MonitoringBoolGaugeCell* cell);
typedef struct TFE_MonitoringBoolGauge0 TFE_MonitoringBoolGauge0;
TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge0* TFE_MonitoringNewBoolGauge0(
const char* name, TF_Status* out_status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge0(
TFE_MonitoringBoolGauge0* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
TFE_MonitoringGetCellBoolGauge0(TFE_MonitoringBoolGauge0* gauge);
typedef struct TFE_MonitoringBoolGauge1 TFE_MonitoringBoolGauge1;
TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge1* TFE_MonitoringNewBoolGauge1(
const char* name, TF_Status* out_status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge1(
TFE_MonitoringBoolGauge1* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
TFE_MonitoringGetCellBoolGauge1(TFE_MonitoringBoolGauge1* gauge,
const char* label1);
typedef struct TFE_MonitoringBoolGauge2 TFE_MonitoringBoolGauge2;
TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge2* TFE_MonitoringNewBoolGauge2(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge2(
TFE_MonitoringBoolGauge2* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
TFE_MonitoringGetCellBoolGauge2(TFE_MonitoringBoolGauge2* gauge,
const char* label1, const char* label2);
typedef struct TFE_MonitoringSamplerCell TFE_MonitoringSamplerCell;
TF_CAPI_EXPORT extern void TFE_MonitoringSamplerCellAdd(
TFE_MonitoringSamplerCell* cell, double value);
TF_CAPI_EXPORT extern void TFE_MonitoringSamplerCellValue(
TFE_MonitoringSamplerCell* cell, TF_Buffer* buf);
typedef struct TFE_MonitoringBuckets TFE_MonitoringBuckets;
TF_CAPI_EXPORT extern TFE_MonitoringBuckets*
TFE_MonitoringNewExponentialBuckets(double scale, double growth_factor,
int bucket_count);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBuckets(
TFE_MonitoringBuckets* buckets);
typedef struct TFE_MonitoringSampler0 TFE_MonitoringSampler0;
TF_CAPI_EXPORT extern TFE_MonitoringSampler0* TFE_MonitoringNewSampler0(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler0(
TFE_MonitoringSampler0* sampler);
TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler0(
TFE_MonitoringSampler0* sampler);
typedef struct TFE_MonitoringSampler1 TFE_MonitoringSampler1;
TF_CAPI_EXPORT extern TFE_MonitoringSampler1* TFE_MonitoringNewSampler1(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
const char* description, const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler1(
TFE_MonitoringSampler1* sampler);
TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler1(
TFE_MonitoringSampler1* sampler, const char* label1);
typedef struct TFE_MonitoringSampler2 TFE_MonitoringSampler2;
TF_CAPI_EXPORT extern TFE_MonitoringSampler2* TFE_MonitoringNewSampler2(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
const char* description, const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler2(
TFE_MonitoringSampler2* sampler);
TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler2(
TFE_MonitoringSampler2* sampler, const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetTfrt(TFE_ContextOptions*,
bool use_tfrt);
TF_CAPI_EXPORT extern uint64_t TFE_GetContextId(TFE_Context* ctx);
typedef struct TFE_CancellationManager TFE_CancellationManager;
typedef int64_t TFE_CancellationToken;
typedef struct TFE_CancelCallback {
void (*callback)(void* context);
void* context;
} TFE_CancelCallback;
TF_CAPI_EXPORT extern TFE_CancellationManager* TFE_NewCancellationManager();
TF_CAPI_EXPORT extern bool TFE_CancellationManagerIsCancelled(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerIsCancelling(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern void TFE_CancellationManagerStartCancel(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern TFE_CancellationToken TFE_CancellationManagerGetToken(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerRegisterCallback(
TFE_CancellationManager*, TFE_CancellationToken token,
const TFE_CancelCallback* c_callback, const char* callback_name);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerDeregisterCallback(
TFE_CancellationManager*, TFE_CancellationToken token);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerTryDeregisterCallback(
TFE_CancellationManager*, TFE_CancellationToken token);
TF_CAPI_EXPORT extern void TFE_DeleteCancellationManager(
TFE_CancellationManager*);
typedef struct TFE_CancellationManager TFE_CancellationManager;
TF_CAPI_EXPORT extern void TFE_OpSetCancellationManager(
TFE_Op* op, TFE_CancellationManager* cancellation_manager,
TF_Status* status);
typedef struct TFE_Executor TFE_Executor;
TF_CAPI_EXPORT extern TFE_Executor* TFE_NewExecutor(
bool is_async, bool enable_streaming_enqueue, int in_flight_nodes_limit);
TF_CAPI_EXPORT extern void TFE_DeleteExecutor(TFE_Executor*);
TF_CAPI_EXPORT extern bool TFE_ExecutorIsAsync(TFE_Executor*);
TF_CAPI_EXPORT extern void TFE_ExecutorWaitForAllPendingNodes(
TFE_Executor*, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ExecutorClearError(TFE_Executor*);
TF_CAPI_EXPORT extern void TFE_ContextSetExecutorForThread(TFE_Context*,
TFE_Executor*);
TF_CAPI_EXPORT extern TFE_Executor* TFE_ContextGetExecutorForThread(
TFE_Context*);
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status,
bool clear_existing_contexts);
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeoutAndRetries(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, int retries, TF_Status* status,
bool clear_existing_contexts);
TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
const char* worker_name,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
TF_Status* status);
TF_CAPI_EXPORT extern void* TFE_TensorHandleDevicePointer(TFE_TensorHandle*,
TF_Status*);
TF_CAPI_EXPORT extern size_t TFE_TensorHandleDeviceMemorySize(TFE_TensorHandle*,
TF_Status*);
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
TFE_Context* ctx, const char* device_name, TF_DataType, const int64_t* dims,
int num_dims, void* data, size_t len,
void (*deallocator)(void* data, size_t len, void* arg),
void* deallocator_arg, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_HostAddressSpace(TFE_Context* ctx,
TF_Buffer* buf);
typedef struct TFE_OpAttrs TFE_OpAttrs;
TF_CAPI_EXPORT extern const TFE_OpAttrs* TFE_OpGetAttrs(const TFE_Op* op);
TF_CAPI_EXPORT extern void TFE_OpAddAttrs(TFE_Op* op, const TFE_OpAttrs* attrs);
TF_CAPI_EXPORT extern void TFE_OpAttrsSerialize(const TFE_OpAttrs* attrs,
TF_Buffer* buf,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_OpSetAttrValueProto(const TFE_Op* op,
const char* attr_name,
const void* proto,
size_t proto_len,
TF_Status* status);
#define TFE_CUSTOM_DEVICE_VERSION 4
typedef struct TFE_CustomDevice {
int version = TFE_CUSTOM_DEVICE_VERSION;
TFE_TensorHandle* (*copy_tensor_to_device)(TFE_Context* context,
TFE_TensorHandle* tensor,
TF_Status* status,
void* device_info);
TFE_TensorHandle* (*copy_tensor_from_device)(TFE_Context* context,
TFE_TensorHandle* tensor,
const char* target_device_name,
TF_Status* status,
void* device_info);
void (*execute)(const TFE_Op* op, int* num_outputs,
TFE_TensorHandle** outputs, TF_Status* s, void* device_info);
void (*delete_device)(void* device_info);
TFE_TensorHandle* (*pack)(TFE_Context* context, TFE_TensorHandle** handles,
int num_handles, TF_Status* s,
void* device_info) = nullptr;
bool (*shall_pin_to_this_device)(const TFE_Op* op, TF_Status* s) = nullptr;
} TFE_CustomDevice;
TF_CAPI_EXPORT extern void TFE_RegisterCustomDevice(TFE_Context* ctx,
TFE_CustomDevice device,
const char* device_name,
void* device_info,
TF_Status* status);
TF_CAPI_EXPORT extern bool TFE_IsCustomDevice(TFE_Context* ctx,
const char* device_name);
typedef struct TFE_CustomDeviceTensorHandleMethods {
int version = TFE_CUSTOM_DEVICE_VERSION;
int (*num_dims)(void* data, TF_Status* status);
int64_t (*dim)(void* data, int dim_index, TF_Status* status);
void (*deallocator)(void* data);
TF_Buffer* (*summarize)(void* data, TF_Status* status) = nullptr;
} TFE_CustomDeviceTensorHandle;
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewCustomDeviceTensorHandle(
TFE_Context*, const char* device_name, TF_DataType, void* data,
TFE_CustomDeviceTensorHandle methods, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextGetFunctionDef(TFE_Context* ctx,
const char* function_name,
TF_Buffer* buf,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_Contex | #include "tensorflow/c/eager/c_api_experimental.h"
#include <string.h>
#include <string>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h"
#include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/lib/monitoring/collection_registry.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
using tensorflow::string;
namespace tensorflow {
namespace {
static bool HasSubstr(absl::string_view base, absl::string_view substr) {
bool ok = absl::StrContains(base, substr);
EXPECT_TRUE(ok) << base << ", expected substring " << substr;
return ok;
}
TEST(CAPI, MonitoringCounter0) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0("test/counter", status, "description");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, 1);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 1);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/counter",
metrics->point_set_map.at("test/counter")->metric_name);
EXPECT_EQ(
1, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
TFE_MonitoringCounterCellIncrementBy(cell, 5);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 6);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(
6, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
TFE_MonitoringDeleteCounter0(counter);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(metrics->point_set_map.end(),
metrics->point_set_map.find("test/counter"));
}
TEST(CAPI, MonitoringCounterMultiple) {
TF_Status* status = TF_NewStatus();
auto* counter1 = TFE_MonitoringNewCounter1("test/counter1", status,
"description", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellCounter1(counter1, "test");
TFE_MonitoringCounterCellIncrementBy(cell1, 1);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell1), 1);
auto* counter2 = TFE_MonitoringNewCounter2("test/counter2", status,
"description", "label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
auto* cell2 = TFE_MonitoringGetCellCounter2(counter2, "foo", "bar");
TFE_MonitoringCounterCellIncrementBy(cell2, 2);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell2), 2);
TFE_MonitoringDeleteCounter1(counter1);
TFE_MonitoringDeleteCounter2(counter2);
}
TEST(CAPI, MonitoringGauge0) {
TF_Status* status = TF_NewStatus();
auto* gauge = TFE_MonitoringNewIntGauge0("test/gauge", status, "test");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell = TFE_MonitoringGetCellIntGauge0(gauge);
TFE_MonitoringIntGaugeCellSet(cell, 1);
EXPECT_EQ(TFE_MonitoringIntGaugeCellValue(cell), 1);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/gauge", metrics->point_set_map.at("test/gauge")->metric_name);
EXPECT_EQ(1,
metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
TFE_MonitoringIntGaugeCellSet(cell, 5);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(5,
metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
TFE_MonitoringDeleteIntGauge0(gauge);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringMultipleGauge) {
TF_Status* status = TF_NewStatus();
auto* gauge1 =
TFE_MonitoringNewBoolGauge1("test/gauge1", status, "test", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellBoolGauge1(gauge1, "foo");
TFE_MonitoringBoolGaugeCellSet(cell1, true);
EXPECT_TRUE(TFE_MonitoringBoolGaugeCellValue(cell1));
TFE_MonitoringDeleteBoolGauge1(gauge1);
auto* gauge2 = TFE_MonitoringNewStringGauge2("test/gauge2", status, "test",
"label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell2 = TFE_MonitoringGetCellStringGauge2(gauge2, "foo", "bar");
TFE_MonitoringStringGaugeCellSet(cell2, "str");
auto* buf = new TF_Buffer;
TFE_MonitoringStringGaugeCellValue(cell2, buf);
string data(static_cast<const char*>(buf->data), buf->length);
TF_DeleteBuffer(buf);
EXPECT_EQ(data, "str");
TFE_MonitoringDeleteStringGauge2(gauge2);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringSampler0) {
TF_Status* status = TF_NewStatus();
auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
auto* sampler =
TFE_MonitoringNewSampler0("test/sampler", buckets, status, "test");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell = TFE_MonitoringGetCellSampler0(sampler);
TFE_MonitoringSamplerCellAdd(cell, 1.0);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/sampler",
metrics->point_set_map.at("test/sampler")->metric_name);
EXPECT_EQ(1.0, metrics->point_set_map.at("test/sampler")
->points.at(0)
->histogram_value.sum());
TFE_MonitoringSamplerCellAdd(cell, 5.0);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(6.0, metrics->point_set_map.at("test/sampler")
->points.at(0)
->histogram_value.sum());
TFE_MonitoringDeleteBuckets(buckets);
TFE_MonitoringDeleteSampler0(sampler);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringMultipleSampler) {
TF_Status* status = TF_NewStatus();
auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
auto* sampler1 = TFE_MonitoringNewSampler1("test/sampler1", buckets, status,
"test", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellSampler1(sampler1, "foo");
TFE_MonitoringSamplerCellAdd(cell1, 1.0);
TFE_MonitoringSamplerCellAdd(cell1, 2.0);
TF_Buffer* result1 = TF_NewBuffer();
TFE_MonitoringSamplerCellValue(cell1, result1);
tensorflow::HistogramProto histogram1;
EXPECT_TRUE(histogram1.ParseFromString(
{reinterpret_cast<const char*>(result1->data), result1->length}));
EXPECT_EQ(histogram1.sum(), 3.0);
TF_DeleteBuffer(result1);
TFE_MonitoringDeleteSampler1(sampler1);
auto* sampler2 = TFE_MonitoringNewSampler2("test/sampler2", buckets, status,
"test", "label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell2 = TFE_MonitoringGetCellSampler2(sampler2, "foo", "bar");
TFE_MonitoringSamplerCellAdd(cell2, 2.0);
TFE_MonitoringSamplerCellAdd(cell2, 3.0);
TF_Buffer* result2 = TF_NewBuffer();
TFE_MonitoringSamplerCellValue(cell2, result2);
tensorflow::HistogramProto histogram2;
EXPECT_TRUE(histogram2.ParseFromString(
{reinterpret_cast<const char*>(result2->data), result2->length}));
EXPECT_EQ(histogram2.sum(), 5.0);
TF_DeleteBuffer(result2);
TFE_MonitoringDeleteSampler2(sampler2);
TFE_MonitoringDeleteBuckets(buckets);
TF_DeleteStatus(status);
}
TEST(CAPI, CancellationManager) {
TFE_CancellationManager* c_mgr = TFE_NewCancellationManager();
EXPECT_FALSE(TFE_CancellationManagerIsCancelled(c_mgr));
TFE_CancelCallback callback1;
callback1.callback = [](void* context) {
ADD_FAILURE() << "Callback1 should be deregistered.";
};
TFE_CancellationToken token1 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token1, &callback1,
"callback1"));
TFE_CancelCallback callback2;
bool callback2_invoked = false;
callback2.context = &callback2_invoked;
callback2.callback = [](void* context) {
*reinterpret_cast<bool*>(context) = true;
};
TFE_CancellationToken token2 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token2, &callback2,
"callback2"));
TFE_CancellationToken token3 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token3, &callback1,
"callback3"));
EXPECT_TRUE(TFE_CancellationManagerDeregisterCallback(c_mgr, token1));
EXPECT_TRUE(TFE_CancellationManagerTryDeregisterCallback(c_mgr, token3));
TFE_CancellationManagerStartCancel(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerIsCancelled(c_mgr));
EXPECT_TRUE(callback2_invoked);
TFE_DeleteCancellationManager(c_mgr);
}
TEST(CAPI, ExecutorContextDestructionOrder) {
TF_Status* status = TF_NewStatus();
{
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* executor = TFE_NewExecutor(
false, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_DeleteContext(ctx);
TFE_DeleteExecutor(executor);
}
{
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* executor = TFE_NewExecutor(
false, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TF_DeleteStatus(status);
}
TEST(CAPI, Function_ident_CPU) {
TF_Graph* function_graph = TF_NewGraph();
TF_OperationDescription* arg_descr =
TF_NewOperation(function_graph, "Placeholder", "arg");
TF_SetAttrType(arg_descr, "dtype", TF_INT32);
TF_Status* status = TF_NewStatus();
TF_Operation* arg = TF_FinishOperation(arg_descr, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_OperationDescription* id_descr =
TF_NewOperation(function_graph, "Identity", "id");
TF_SetAttrType(id_descr, "T", TF_INT32);
TF_AddInput(id_descr, {arg, 0});
TF_Operation* id = TF_FinishOperation(id_descr, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_Output input{arg, 0};
TF_Output output{id, 0};
TF_Function* fn =
TF_GraphToFunction(function_graph, "ident", 0, 1, &id, 1, &input, 1,
&output, nullptr, nullptr, "test", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteGraph(function_graph);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_ContextAddFunction(ctx, fn, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteFunction(fn);
for (bool async : {false, true, false}) {
TFE_Executor* old_executor = TFE_ContextGetExecutorForThread(ctx);
TFE_Executor* executor = TFE_NewExecutor(
async, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* t =
TF_AllocateTensor(TF_INT32, nullptr, 0, 1 * sizeof(tensorflow::int32));
*reinterpret_cast<tensorflow::int32*>(TF_TensorData(t)) = 42;
TFE_TensorHandle* h = TFE_NewTensorHandle(t, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteTensor(t);
TFE_Op* op = TFE_NewOp(ctx, "ident", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_OpAddInput(op, h, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
std::vector<TFE_TensorHandle*> result;
result.push_back(nullptr);
int num_retvals = 1;
TFE_Execute(op, result.data(), &num_retvals, status);
TFE_DeleteOp(op);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
ASSERT_EQ(num_retvals, 1);
TF_Tensor* r = TFE_TensorHandleResolve(result[0], status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
EXPECT_EQ(*reinterpret_cast<tensorflow::int32*>(TF_TensorData(r)), 42);
TFE_ContextSetExecutorForThread(ctx, old_executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
TFE_DeleteExecutor(old_executor);
TFE_DeleteTensorHandle(h);
TF_DeleteTensor(r);
TFE_DeleteTensorHandle(result[0]);
}
TFE_ContextRemoveFunction(ctx, "ident", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContext(ctx);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteStatus(status);
}
void Executor_MatMul_CPU(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* old_executor = TFE_ContextGetExecutorForThread(ctx);
TFE_Executor* executor = TFE_NewExecutor(
async, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_TensorHandle* retvals[2] = {nullptr, nullptr};
int num_retvals = 2;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
EXPECT_EQ(1, num_retvals);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(retvals[0]);
TFE_ContextSetExecutorForThread(ctx, old_executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
TFE_DeleteExecutor(old_executor);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(7, product[0]);
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
TF_DeleteStatus(status);
}
TEST(CAPI, Executor_MatMul_CPU) { Executor_MatMul_CPU(false); }
TEST(CAPI, Executor_MatMul_CPUAsync) { Executor_MatMul_CPU(true); }
void Deleter(void* data, size_t unused, void* tensor_handle) {
TFE_DeleteTensorHandle(static_cast<TFE_TensorHandle*>(tensor_handle));
}
TEST(CAPI, TensorHandleOnDeviceMemory) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TF_Tensor* m_data = TFE_TensorHandleResolve(m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float* m_float = static_cast<float*>(TF_TensorData(m_data));
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int num_devices = TF_DeviceListCount(devices);
for (int d = 0; d < num_devices; ++d) {
const char* name = TF_DeviceListName(devices, d, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* copy = TFE_TensorHandleCopyToDevice(m, ctx, name, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
void* data = TFE_TensorHandleDevicePointer(copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
size_t size = TFE_TensorHandleDeviceMemorySize(copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int64_t dims[] = {2, 2};
TFE_TensorHandle* copy_aliased = TFE_NewTensorHandleFromDeviceMemory(
ctx, name, TF_FLOAT, dims, 2, data, size, &Deleter, copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* on_host =
TFE_TensorHandleCopyToDevice(copy_aliased, ctx, "CPU:0", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* resolved = TFE_TensorHandleResolve(on_host, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const float* resolved_data =
static_cast<const float*>(TF_TensorData(resolved));
EXPECT_EQ(0, memcmp(m_float, resolved_data, 4 * sizeof(float)));
TF_DeleteTensor(resolved);
TFE_DeleteTensorHandle(copy_aliased);
TFE_DeleteTensorHandle(on_host);
}
TF_DeleteDeviceList(devices);
TF_DeleteTensor(m_data);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, TensorHandleNullptr) {
TFE_TensorHandle* h = nullptr;
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
const char* device_type = TFE_TensorHandleDeviceType(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_type, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
int device_id = TFE_TensorHandleDeviceID(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_id, -1);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
}
TEST(CAPI, TensorHandleDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
const char* device_type = TFE_TensorHandleDeviceType(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = TFE_TensorHandleDeviceID(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
hcpu, ctx, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_Op* shape_op = ShapeOp(ctx, hgpu);
TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
device_type = TFE_TensorHandleDeviceType(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "GPU")) << device_type;
device_id = TFE_TensorHandleDeviceID(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
TFE_DeleteOp(shape_op);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(hgpu);
}
TFE_DeleteTensorHandle(hcpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleDefaults) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* h_default = TestMatrixTensorHandle(ctx);
const char* device_type = TFE_TensorHandleDeviceType(h_default, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = TFE_TensorHandleDeviceID(h_default, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
TFE_TensorHandle* h_cpu = TFE_TensorHandleCopyToDevice(
h_default, ctx, "/device:CPU:0", status.get());
const char* device_type_cpu = TFE_TensorHandleDeviceType(h_cpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type_cpu, "CPU")) << device_type_cpu;
int device_id_cpu = TFE_TensorHandleDeviceID(h_cpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id_cpu) << device_id_cpu;
TFE_DeleteTensorHandle(h_default);
TFE_DeleteTensorHandle(h_cpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, CreateLocalContextAsReset) {
tensorflow::ServerDef server_def = GetServerDef("worker", 2);
server_def.mutable_default_session_config()->set_isolate_session_state(false);
ServerFactory* factory;
ASSERT_TRUE(ServerFactory::GetFactory(server_def, &factory).ok());
server_def.set_job_name("worker");
server_def.set_task_index(0);
std::unique_ptr<tensorflow::ServerInterface> w0;
ASSERT_TRUE(
factory->NewServer(server_def, ServerFactory::Options(), &w0).ok());
ASSERT_TRUE(w0->Start().ok());
server_def.set_task_index(1);
std::unique_ptr<tensorflow::ServerInterface> w1;
ASSERT_TRUE(
factory->NewServer(server_def, ServerFactory::Options(), &w1).ok());
ASSERT_TRUE(w1->Start().ok());
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
opts->session_options.options.config.set_isolate_session_state(false);
TFE_ContextOptionsSetDevicePlacementPolicy(opts, TFE_DEVICE_PLACEMENT_SILENT);
TFE_Context* ctx = TFE_NewContext(opts, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
server_def.set_task_index(0);
auto cluster = server_def.mutable_cluster();
auto client_job = cluster->add_job();
client_job->set_name("localhost");
int client_port = tensorflow::testing::PickUnusedPortOrDie();
client_job->mutable_tasks()->insert(
{0, strings::StrCat("localhost:", client_port)});
server_def.set_job_name("localhost");
auto serialized = server_def.SerializeAsString();
TFE_ContextSetServerDef(ctx, 0, serialized.data(), serialized.size(), status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
server_def.set_job_name("worker");
server_def.set_task_index(0);
tensorflow::ClusterDef* cluster_def = server_def.mutable_cluster();
tensorflow::JobDef* job_def = cluster_def->mutable_job(0);
int worker_port = tensorflow::testing::PickUnusedPortOrDie();
job_def->mutable_tasks()->at(0) =
tensorflow::strings::StrCat("localhost:", worker_port);
serialized = server_def.SerializeAsString();
TFE_InitializeLocalOnlyContext(ctx, 0, serialized.data(), serialized.size(),
status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
w0.release();
w1.release();
}
TEST(CAPI, ShareVariableAcrossContextsAfterUpdateContextWorksWithTimeout) {
tensorflow::ServerDef server_def_0 = GetServerDef(3);
server_def_0.mutable_default_session_config()->set_isolate_session_state(
false);
tensorflow::ServerDef server_def_1 =
ReplaceTaskInServerDef(server_def_0, 0);
string serialized_server_def_0 = server_def_0.SerializeAsString();
string serialized_server_def_1 = server_def_1.SerializeAsString();
server_def_0.set_task_index(1);
std::unique_ptr<tensorflow::GrpcServer> worker_server1;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
server_def_0.set_task_index(2);
std::unique_ptr<tensorflow::GrpcServer> worker_server2;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server2)
.ok());
ASSERT_TRUE(worker_server2->Start().ok());
int32_t init_timeout_in_ms = 300000;
TFE_Context* ctx_0 =
CreateContext(serialized_server_def_0,
false, init_timeout_in_ms);
TFE_Context* ctx_1 =
CreateContext(serialized_server_def_1,
false, init_timeout_in_ms);
const char remote_device[] = "/job:localhost/replica:0/task:2/device:CPU:0";
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_0);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_1);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
TFE_TensorHandle* handle_0 =
CreateVariable(ctx_0, 1.2, remote_device, "var");
TF_Status* status = TF_NewStatus();
TFE_ContextAsyncWait(ctx_0, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
int port = tensorflow::testing::PickUnusedPortOrDie();
ReplaceTaskInServerDef(&server_def_0, 1, "localhost", port);
ReplaceTaskInServerDef(&server_def_1, 1, "localhost", port);
server_def_0.set_task_index(1);
worker_server1.release();
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
{
server_def_0.set_task_index(0);
string serialized_update = server_def_0.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDefWithTimeout(ctx_0, 0, serialized_update.data(),
serialized_update.size(),
init_timeout_in_ms, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
server_def_1.set_task_index(0);
string serialized_update = server_def_1.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDefWithTimeout(ctx_1, 0, serialized_update.data(),
serialized_update.size(),
init_timeout_in_ms, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
TFE_TensorHandle* var_handle =
CreateVarHandle(ctx_1, remote_device, "var");
TFE_TensorHandle* handle_1 = nullptr;
int num_retvals = 1;
TF_Status* status = TF_NewStatus();
TFE_Op* op = TFE_NewOp(ctx_1, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &handle_1, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(op);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(handle_1));
EXPECT_EQ(0, TFE_TensorHandleNumDims(handle_1, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(handle_1, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(1.2f, value);
TFE_DeleteTensorHandle(handle_1);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(var_handle);
}
TFE_DeleteTensorHandle(handle_0);
TFE_DeleteContext(ctx_0);
TFE_DeleteContext(ctx_1);
worker_server1.release();
worker_server2.release();
}
}
} | 952 |
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_FLAGS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_FLAGS_H_
#include "absl/flags/declare.h"
ABSL_DECLARE_FLAG(bool, next_pluggable_device_use_c_api);
#endif
#include "tensorflow/core/common_runtime/next_pluggable_device/flags.h"
#include "absl/flags/flag.h"
ABSL_FLAG(bool, next_pluggable_device_use_c_api,
DEFAULT_TF_NEXT_PLUGGABLE_DEVICE_USE_C_API,
"Uses next pluggable device c API."); | #include "tensorflow/core/config/flags.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(TFFlags, ReadFlagValue) {
EXPECT_TRUE(flags::Global().test_only_experiment_1.value());
EXPECT_FALSE(flags::Global().test_only_experiment_2.value());
}
TEST(TFFlags, ResetFlagValue) {
EXPECT_TRUE(flags::Global().test_only_experiment_1.value());
flags::Global().test_only_experiment_1.reset(false);
EXPECT_FALSE(flags::Global().test_only_experiment_1.value());
}
}
} | 1,082 |
#ifndef TENSORFLOW_CORE_KERNELS_SENDRECV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_SENDRECV_OPS_H_
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
class SendOp : public OpKernel {
public:
explicit SendOp(OpKernelConstruction* ctx);
void Compute(OpKernelContext* ctx) override;
string TraceString(const OpKernelContext& ctx, bool verbose) const override;
private:
string key_prefix_;
Rendezvous::ParsedKey parsed_key_;
bool hostmem_sendrecv_;
SendOp(const SendOp&) = delete;
void operator=(const SendOp&) = delete;
};
class RecvOp : public AsyncOpKernel {
public:
explicit RecvOp(OpKernelConstruction* ctx);
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
string TraceString(const OpKernelContext& ctx, bool verbose) const override;
private:
string key_prefix_;
Rendezvous::ParsedKey parsed_key_;
bool hostmem_sendrecv_;
RecvOp(const RecvOp&) = delete;
void operator=(const RecvOp&) = delete;
};
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
REGISTER_OP("_Send")
.Input("tensor: T")
.Attr("T: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Sends the named tensor from send_device to recv_device.
tensor: The tensor to send.
tensor_name: The name of the tensor to send.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
REGISTER_OP("Send")
.Input("tensor: T")
.Attr("T: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("_Recv")
.Output("tensor: tensor_type")
.Attr("tensor_type: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Receives the named tensor from send_device on recv_device.
tensor: The tensor to receive.
tensor_name: The name of the tensor to receive.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
REGISTER_OP("Recv")
.Output("tensor: tensor_type")
.Attr("tensor_type: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("_HostSend")
.Input("tensor: T")
.Attr("T: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Sends the named tensor from send_device to recv_device.
_HostSend requires its input on host memory whereas _Send requires its
input on device memory.
tensor: The tensor to send.
tensor_name: The name of the tensor to send.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
REGISTER_OP("_HostRecv")
.Output("tensor: tensor_type")
.Attr("tensor_type: type")
.Attr("tensor_name: string")
.Attr("send_device: string")
.Attr("send_device_incarnation: int")
.Attr("recv_device: string")
.Attr("client_terminated: bool = false")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
Receives the named tensor from send_device on recv_device.
_HostRecv produces its output on host memory whereas _Recv produces its
output on device memory.
tensor: The tensor to receive.
tensor_name: The name of the tensor to receive.
send_device: The name of the device sending the tensor.
send_device_incarnation: The current incarnation of send_device.
recv_device: The name of the device receiving the tensor.
client_terminated: If set to true, this indicates that the node was added
to the graph as a result of a client-side feed or fetch of Tensor data,
in which case the corresponding send or recv is expected to be managed
locally by the caller.
)doc");
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class DummyRendezvous : public Rendezvous {
Status Send(const ParsedKey& key, const Args& args, const Tensor& val,
const bool is_dead) override {
return absl::OkStatus();
}
void RecvAsync(const ParsedKey& key, const Args& args,
DoneCallback done) override {
static Tensor* t = new Tensor(DT_FLOAT, TensorShape({0}));
done(absl::OkStatus(), args, args, *t, false);
}
void StartAbort(const Status& status) override {}
};
static Graph* Send() {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(DT_FLOAT, TensorShape({0}));
test::graph::Send(g, test::graph::Constant(g, in0), "T", "/cpu:0", 1,
"/cpu:0");
test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0");
return g;
}
static Graph* Recv() {
Graph* g = new Graph(OpRegistry::Global());
test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0");
return g;
}
void BM_Send(::testing::benchmark::State& state) {
test::Benchmark("cpu", Send(), nullptr, nullptr, new DummyRendezvous, "",
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_Send)->UseRealTime();
void BM_Recv(::testing::benchmark::State& state) {
test::Benchmark("cpu", Recv(), nullptr, nullptr, new DummyRendezvous, "",
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_Recv)->UseRealTime();
}
} | 1,134 |
#ifndef TENSORFLOW_CORE_TFRT_KERNELS_IFRT_PROGRAM_OPS_H_
#define TENSORFLOW_CORE_TFRT_KERNELS_IFRT_PROGRAM_OPS_H_
#include <stdint.h>
#include <string>
#include <vector>
#include "absl/base/call_once.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace tfrt_stub {
class IfrtCallOp : public tensorflow::OpKernel {
public:
explicit IfrtCallOp(tensorflow::OpKernelConstruction* ctx);
IfrtCallOp(const IfrtCallOp& other) = delete;
IfrtCallOp& operator=(const IfrtCallOp& other) = delete;
void Compute(tensorflow::OpKernelContext* ctx) override;
private:
int64_t program_id_;
std::vector<std::string> variable_names_;
std::vector<int> variable_arg_indices_;
absl::once_flag init_once_;
tensorflow::ifrt_serving::IfrtServingExecutable* executable_;
};
}
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
namespace tfrt_stub {
REGISTER_OP("IfrtCall")
.Input("args: Tin")
.Output("results: Tout")
.Attr("Tin: list(type) >= 0")
.Attr("Tout: list(type) >= 0")
.Attr("program_id: int")
.Attr("variable_arg_indices: list(int)")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
Calls an IFRT program identified by the given program id.
This op looks up a `ServingExecutable` from `ServingExecutableRegistry` using
the program id, calls the executable with the op's inputs as arguments, and
returns its results as the op's outputs.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
program_id: int64 id that can be used to look up compiled programs from
ServingExecutableRegistry`.
variable_arg_indices: must be in sorted ascending order. The argument at position
variable_arg_indices[k] in tpu program is already loaded as an ifrt array and
the input `args[variable_arg_indices[k]]` is the key to look for this loaded array.
)");
REGISTER_OP("IfrtLoadVariable")
.Input("variable: Tin")
.Output("array_key: Tout")
.Output("tensor: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.Attr("used_by_host: bool")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
This op loads a restored variable tensor as a tensor future. It is areplacement of `tf.ReadVariableOp`.
This op returns a scalar string tensor containing the restored variable name, which
is composed from `container_name` and `shared_name` from a `var_handle` and can be
used as a key within the runtime, as well as a future for the tensor.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
variable: the variable handle of the variable tensor to be loaded.
array_key: the key to be used to look up the loaded array by the 'IfrtCall' op.
tensor: the future of the loaded tensor. The future contains a valid tensor if `use_by_host` is true.
'used_by_host': a boolean indicating whether the variable is used by the host OP
or excelusively by the TPU.
)");
}
} | #include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using tensorflow::ifrt_serving::ServingExecutableRegistry;
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using tensorflow::ifrt_serving::test_utils::IfrtServingExecutableTestHelper;
using tensorflow::test::AsTensor;
using tensorflow::test::TensorEq;
using ::testing::Return;
class IfrtCallOpTest : public OpsTestBase {
protected:
Status Init(int64_t program_id, int num_inputs, DataType input_type,
const std::vector<int>& variable_arg_indices,
const std::vector<DataType>& output_type_list) {
TF_CHECK_OK(NodeDefBuilder("op", "IfrtCall")
.Input(FakeInput(num_inputs, input_type))
.Attr("program_id", program_id)
.Attr("variable_arg_indices", variable_arg_indices)
.Attr("Tout", output_type_list)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IfrtCallOpTest, Basic) {
int64_t program_id = 123;
TF_ASSERT_OK(Init(
program_id,
2,
DT_INT32,
{},
{DT_INT32}));
tsl::test_util::MockServingDeviceSelector selector;
IfrtServingExecutableTestHelper helper(&selector);
EXPECT_CALL(selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
TF_ASSERT_OK_AND_ASSIGN(
ServingExecutableRegistry::Handle handle,
ServingExecutableRegistry::Register(program_id, std::move(executable)));
auto handle_cleaner = gtl::MakeCleanup([&handle] { handle.Release(); });
AddInputFromArray<int32_t>(TensorShape({1, 3}), {1, 2, 3});
AddInputFromArray<int32_t>(TensorShape({3, 1}), {1, 2, 3});
for (int i = 0; i < helper.num_cores() + 1; ++i) {
TF_ASSERT_OK(RunOpKernel());
}
Tensor expected_out = AsTensor<int32_t>({14}, TensorShape({1, 1}));
EXPECT_THAT(*GetOutput(0), TensorEq(expected_out));
}
}
}
} | 1,329 |
#ifndef XLA_PYTHON_IFRT_VALUE_H_
#define XLA_PYTHON_IFRT_VALUE_H_
#include <string>
#include "absl/status/status.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/future.h"
#include "xla/tsl/concurrency/ref_count.h"
namespace xla {
namespace ifrt {
class Client;
class Value : public tsl::ReferenceCounted<Value>,
public llvm::RTTIExtends<Value, llvm::RTTIRoot> {
public:
Value() = default;
Value(const Value&) = delete;
Value(Value&&) = delete;
Value& operator=(const Value&) = delete;
Value& operator=(Value&&) = delete;
virtual Client* client() const = 0;
virtual Future<> GetReadyFuture() const = 0;
virtual Future<> Delete() = 0;
virtual bool IsDeleted() const = 0;
virtual std::string DebugString() const = 0;
static char ID;
};
}
}
#endif
#include "xla/python/ifrt/value.h"
namespace xla {
namespace ifrt {
char Value::ID = 0;
}
} | #include "tensorflow/cc/experimental/libtf/value.h"
#include <cstdint>
#include "tensorflow/cc/experimental/libtf/value_iostream.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
namespace impl {
TEST(ValueTest, TestBasic) {
TaggedValue valuef(3.f);
TaggedValue valuei(int64_t(3));
TaggedValue list = TaggedValue::List();
TaggedValue tuple = TaggedValue::Tuple();
tuple.tuple().push_back(TaggedValue(int64_t(310)));
list.list().push_back(valuei);
list.list().push_back(valuef);
list.list().push_back(tuple);
std::stringstream stream;
stream << list;
ASSERT_EQ(stream.str(), "[3, 3, (310, ), ]");
}
TEST(ValueTest, TestString) {
TaggedValue value1a("string1");
std::string s = "string";
s += "1";
TaggedValue value1b(s.c_str());
ASSERT_EQ(value1b.s(), value1a.s());
TaggedValue value2("string2");
ASSERT_NE(value1a.s(), value2.s());
ASSERT_STREQ(value1a.s(), "string1");
ASSERT_STREQ(value2.s(), "string2");
}
TEST(Test1, TestDict) {
TaggedValue s1("test1");
TaggedValue s2("test2");
TaggedValue d = TaggedValue::Dict();
d.dict()[s2] = TaggedValue(6.f);
std::stringstream stream;
stream << d;
ASSERT_EQ(stream.str(), "{test2: 6, }");
}
namespace {
TaggedValue add(TaggedValue args, TaggedValue kwargs) {
if (args.type() == TaggedValue::TUPLE) {
return TaggedValue(args.tuple()[0].f32() + args.tuple()[1].f32());
}
return TaggedValue::None();
}
}
TEST(Test1, TestFunctionCall) {
TaggedValue f32 = TaggedValue(add);
TaggedValue args = TaggedValue::Tuple();
args.tuple().emplace_back(TaggedValue(1.f));
args.tuple().emplace_back(TaggedValue(2.f));
TaggedValue c = f32.func()(args, TaggedValue::None()).value();
ASSERT_EQ(c, TaggedValue(3.f));
}
namespace {
int alloc_count = 0;
class Cool {
public:
Cool() { alloc_count++; }
~Cool() { alloc_count--; }
};
}
TEST(Test1, TestCapsule) {
TaggedValue test_moved, test_copy;
ASSERT_EQ(alloc_count, 0);
void* ptr_value = new Cool();
{
TaggedValue capsule =
TaggedValue::Capsule(static_cast<void*>(ptr_value),
[](void* x) { delete static_cast<Cool*>(x); });
ASSERT_EQ(alloc_count, 1);
ASSERT_EQ(capsule.capsule(), ptr_value);
test_moved = std::move(capsule);
ASSERT_EQ(capsule.type(), TaggedValue::NONE);
test_copy = test_moved;
ASSERT_EQ(test_moved.capsule(), ptr_value);
ASSERT_EQ(test_copy.capsule(), ptr_value);
}
ASSERT_EQ(alloc_count, 1);
test_moved = TaggedValue::None();
ASSERT_EQ(alloc_count, 1);
test_copy = TaggedValue(3.f);
ASSERT_EQ(alloc_count, 0);
}
}
}
} | 1,823 |
#ifndef XLA_SERVICE_CPU_CPU_RUNTIME_H_
#define XLA_SERVICE_CPU_CPU_RUNTIME_H_
#include "xla/executable_run_options.h"
#include "xla/service/cpu/xfeed_manager.h"
namespace xla {
namespace cpu {
namespace runtime {
extern const char* const kEigenMatMulF16SymbolName;
extern const char* const kEigenMatMulF32SymbolName;
extern const char* const kEigenMatMulF64SymbolName;
extern const char* const kEigenMatMulC64SymbolName;
extern const char* const kEigenMatMulC128SymbolName;
extern const char* const kEigenMatMulS32SymbolName;
extern const char* const kEigenBatchMatMulF32SymbolName;
extern const char* const kMKLConv2DF32SymbolName;
extern const char* const kACLConv2DF32SymbolName;
extern const char* const kACLMatMulF32SymbolName;
extern const char* const kACLBatchMatMulF32SymbolName;
extern const char* const kEigenConv2DF16SymbolName;
extern const char* const kEigenConv2DF32SymbolName;
extern const char* const kEigenConv3DF16SymbolName;
extern const char* const kEigenConv3DF32SymbolName;
extern const char* const kDuccFftSymbolName;
extern const char* const kDuccSingleThreadedFftSymbolName;
extern const char* const kEigenSingleThreadedMatMulF16SymbolName;
extern const char* const kEigenSingleThreadedMatMulF32SymbolName;
extern const char* const kEigenSingleThreadedMatMulF64SymbolName;
extern const char* const kEigenSingleThreadedMatMulC64SymbolName;
extern const char* const kEigenSingleThreadedMatMulC128SymbolName;
extern const char* const kEigenSingleThreadedMatMulS32SymbolName;
extern const char* const kEigenSingleThreadedConv2DF16SymbolName;
extern const char* const kEigenSingleThreadedConv2DF32SymbolName;
extern const char* const kEigenSingleThreadedConv3DF16SymbolName;
extern const char* const kEigenSingleThreadedConv3DF32SymbolName;
extern const char* const kAcquireInfeedBufferForDequeueSymbolName;
extern const char* const kReleaseInfeedBufferAfterDequeueSymbolName;
extern const char* const kAcquireOutfeedBufferForPopulationSymbolName;
extern const char* const kReleaseOutfeedBufferAfterPopulationSymbolName;
extern const char* const kParallelForkJoinSymbolName;
extern const char* const kPrintfToStderrSymbolName;
extern const char* const kStatusIsSuccessSymbolName;
extern const char* const kKeyValueSortSymbolName;
extern const char* const kTopKF32SymbolName;
extern const char* const kAllReduceSymbolName;
extern const char* const kCollectivePermuteSymbolName;
extern const char* const kPartitionIdSymbolName;
extern const char* const kReplicaIdSymbolName;
extern const char* const kTracingStartSymbolName;
extern const char* const kTracingEndSymbolName;
extern const char* const kAllToAllSymbolName;
extern const char* const kAllGatherSymbolName;
extern const char* const kReduceScatterSymbolName;
extern const char* const kOneDnnMatMulSymbolName;
extern const char* const kOneDnnSoftmaxSymbolName;
extern const char* const kOneDnnLayerNormSymbolName;
extern const char* const kOneDnnConvolutionSymbolName;
extern const char* const kOneDnnMatMulReorderSymbolName;
extern const char* const kHandleFfiCallSymbolName;
extern const char* const kXlaCpuRuntimeSymbolNamePrefix;
XfeedManager* GetXfeedManager(int device_ordinal);
}
}
}
extern "C" {
extern int __xla_cpu_runtime_PrintfToStderr(const char* format, ...);
extern int64_t __xla_cpu_runtime_TracingStart(
const void* run_options_ptr,
const char* name, const char* hlo_module, int64_t program_id);
extern void __xla_cpu_runtime_TracingEnd(
const void* run_options_ptr, int64_t id);
extern void* __xla_cpu_runtime_AcquireInfeedBufferForDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape, int32_t shape_length);
extern void __xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length);
extern void* __xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length);
extern void __xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length);
extern void __xla_cpu_runtime_AllReduce(
const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str, int32_t replica_groups_str_size,
int32_t channel_id_present, int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr, int32_t shape_length,
int32_t num_buffers, void** input_buffers, void** output_buffers);
extern void __xla_cpu_runtime_CollectivePermute(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, int32_t byte_size, void* input_buffer, void* output_buffer,
const void* source_target_pairs, int32_t source_target_pairs_size);
extern void __xla_cpu_runtime_AllToAll(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t num_buffers, int64_t buffer_size,
void** source_buffers, void** destination_buffers);
extern void __xla_cpu_runtime_AllGather(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
const void* replica_groups_str, int32_t replica_groups_str_size,
int64_t buffer_size, void* source_buffer, void* destination_buffer);
void __xla_cpu_runtime_ReduceScatter(
const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str, int32_t replica_groups_str_size,
int32_t channel_id_present, int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type, int64_t chunk_elems,
void* input_buffer, void* output_buffer);
extern void __xla_cpu_runtime_PartitionId(
const xla::ExecutableRunOptions* run_options, void* output_buffer);
extern void __xla_cpu_runtime_ReplicaId(
const xla::ExecutableRunOptions* run_options, void* output_buffer);
}
#endif
#include "xla/service/cpu/cpu_runtime.h"
#include <cstdarg>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/layout_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "xla/service/cpu/in_process_collectives.h"
#include "xla/service/cpu/xfeed_manager.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace cpu {
namespace runtime {
XfeedManager* GetXfeedManager(int device_ordinal) {
static auto* managers = new absl::flat_hash_map<int, XfeedManager*>();
static absl::Mutex* mutex = new absl::Mutex();
absl::MutexLock lock(mutex);
auto it = managers->find(device_ordinal);
if (it == managers->end()) {
it = managers->emplace(device_ordinal, new XfeedManager()).first;
}
return it->second;
}
extern const char* const kEigenMatMulF16SymbolName =
"__xla_cpu_runtime_EigenMatMulF16";
extern const char* const kEigenMatMulF32SymbolName =
"__xla_cpu_runtime_EigenMatMulF32";
extern const char* const kEigenMatMulF64SymbolName =
"__xla_cpu_runtime_EigenMatMulF64";
extern const char* const kEigenMatMulC64SymbolName =
"__xla_cpu_runtime_EigenMatMulC64";
extern const char* const kEigenMatMulC128SymbolName =
"__xla_cpu_runtime_EigenMatMulC128";
extern const char* const kEigenMatMulS32SymbolName =
"__xla_cpu_runtime_EigenMatMulS32";
extern const char* const kEigenBatchMatMulF32SymbolName =
"__xla_cpu_runtime_EigenBatchMatMulF32";
extern const char* const kMKLConv2DF32SymbolName =
"__xla_cpu_runtime_MKLConv2DF32";
extern const char* const kACLConv2DF32SymbolName =
"__xla_cpu_runtime_ACLConv2DF32";
extern const char* const kACLMatMulF32SymbolName =
"__xla_cpu_runtime_ACLMatMulF32";
extern const char* const kACLBatchMatMulF32SymbolName =
"__xla_cpu_runtime_ACLBatchMatMulF32";
extern const char* const kEigenConv2DF16SymbolName =
"__xla_cpu_runtime_EigenConv2DF16";
extern const char* const kEigenConv2DF32SymbolName =
"__xla_cpu_runtime_EigenConv2DF32";
extern const char* const kEigenConv3DF16SymbolName =
"__xla_cpu_runtime_EigenConv3DF16";
extern const char* const kEigenConv3DF32SymbolName =
"__xla_cpu_runtime_EigenConv3DF32";
extern const char* const kDuccFftSymbolName = "__xla_cpu_runtime_DuccFft";
extern const char* const kDuccSingleThreadedFftSymbolName =
"__xla_cpu_runtime_DuccSingleThreadedFft";
extern const char* const kEigenSingleThreadedMatMulF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF16";
extern const char* const kEigenSingleThreadedMatMulF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF32";
extern const char* const kEigenSingleThreadedMatMulF64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulF64";
extern const char* const kEigenSingleThreadedMatMulC64SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC64";
extern const char* const kEigenSingleThreadedMatMulC128SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulC128";
extern const char* const kEigenSingleThreadedMatMulS32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedMatMulS32";
extern const char* const kEigenSingleThreadedConv2DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF16";
extern const char* const kEigenSingleThreadedConv2DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv2DF32";
extern const char* const kEigenSingleThreadedConv3DF16SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF16";
extern const char* const kEigenSingleThreadedConv3DF32SymbolName =
"__xla_cpu_runtime_EigenSingleThreadedConv3DF32";
extern const char* const kAcquireInfeedBufferForDequeueSymbolName =
"__xla_cpu_runtime_AcquireInfeedBufferForDequeue";
extern const char* const kReleaseInfeedBufferAfterDequeueSymbolName =
"__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue";
extern const char* const kAcquireOutfeedBufferForPopulationSymbolName =
"__xla_cpu_runtime_AcquireOutfeedBufferForPopulation";
extern const char* const kReleaseOutfeedBufferAfterPopulationSymbolName =
"__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation";
extern const char* const kParallelForkJoinSymbolName =
"__xla_cpu_runtime_ParallelForkJoin";
extern const char* const kPrintfToStderrSymbolName =
"__xla_cpu_runtime_PrintfToStderr";
extern const char* const kStatusIsSuccessSymbolName =
"__xla_cpu_runtime_StatusIsSuccess";
extern const char* const kKeyValueSortSymbolName =
"__xla_cpu_runtime_KeyValueSort";
extern const char* const kTopKF32SymbolName = "__xla_cpu_runtime_TopKF32";
extern const char* const kTracingStartSymbolName =
"__xla_cpu_runtime_TracingStart";
extern const char* const kTracingEndSymbolName = "__xla_cpu_runtime_TracingEnd";
extern const char* const kXlaCpuRuntimeSymbolNamePrefix = "__xla_cpu_runtime_";
extern const char* const kAllReduceSymbolName = "__xla_cpu_runtime_AllReduce";
extern const char* const kAllGatherSymbolName = "__xla_cpu_runtime_AllGather";
extern const char* const kReduceScatterSymbolName =
"__xla_cpu_runtime_ReduceScatter";
extern const char* const kAllToAllSymbolName = "__xla_cpu_runtime_AllToAll";
extern const char* const kCollectivePermuteSymbolName =
"__xla_cpu_runtime_CollectivePermute";
extern const char* const kPartitionIdSymbolName =
"__xla_cpu_runtime_PartitionId";
extern const char* const kReplicaIdSymbolName = "__xla_cpu_runtime_ReplicaId";
extern const char* const kOneDnnMatMulSymbolName =
"__xla_cpu_runtime_OneDnnMatMul";
extern const char* const kOneDnnSoftmaxSymbolName =
"__xla_cpu_runtime_OneDnnSoftmax";
extern const char* const kOneDnnLayerNormSymbolName =
"__xla_cpu_runtime_OneDnnLayerNorm";
extern const char* const kOneDnnConvolutionSymbolName =
"__xla_cpu_runtime_OneDnnConvolution";
extern const char* const kOneDnnMatMulReorderSymbolName =
"__xla_cpu_runtime_OneDnnMatMulReorder";
extern const char* const kHandleFfiCallSymbolName =
"__xla_cpu_runtime_HandleFfiCall";
namespace {
absl::StatusOr<Shape> DecodeSelfDescribingShapeConstant(const void* shape_ptr,
int32_t size_bytes) {
ShapeProto shape_proto;
if (!shape_proto.ParseFromArray(shape_ptr, size_bytes)) {
return tsl::errors::Internal("Failed parsing the shape proto");
}
Shape shape(shape_proto);
auto status = ShapeUtil::ValidateShape(shape);
if (!status.ok()) {
return status;
}
return std::move(shape);
}
std::string ShapeString(const void* shape_ptr, int32_t shape_length) {
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
if (shape.ok()) {
return ShapeUtil::HumanStringWithLayout(shape.value());
}
return "<invalid shape>";
}
int GetDeviceOrdinal(const ExecutableRunOptions* run_options) {
if (!run_options) {
return 0;
} else if (run_options->device_ordinal() != -1) {
return run_options->device_ordinal();
}
return run_options->stream()->parent()->device_ordinal();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireInfeedBufferForDequeueImpl(const ExecutableRunOptions* run_options,
int32_t buffer_length,
const void* shape,
int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireInfeedBufferForDequeue: "
<< ShapeString(shape, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->infeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program infeed request buffer size " << buffer_length
<< " did not match the runtime's infed buffer length " << buffer->length()
<< "; program reports desired shape: "
<< ShapeString(shape, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseInfeedBufferAfterDequeueImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseInfeedBufferAfterDeque: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->infeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void* AcquireOutfeedBufferForPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "AcquireOutfeedBufferForPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
XfeedBuffer* buffer = xfeed->outfeed()->BlockingDequeueBuffer();
CHECK_EQ(buffer->length(), buffer_length)
<< "XLA program outfeed request buffer size " << buffer_length
<< " did not match the runtime's outfeed buffer length "
<< buffer->length() << "; program reports outfed shape: "
<< ShapeString(shape_ptr, shape_length);
return buffer->data();
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReleaseOutfeedBufferAfterPopulationImpl(
const ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
int device_ordinal = GetDeviceOrdinal(run_options);
VLOG(2) << "ReleaseOutfeedBufferAfterPopulation: "
<< ShapeString(shape_ptr, shape_length) << " on stream executor "
<< device_ordinal;
XfeedManager* xfeed = GetXfeedManager(device_ordinal);
absl::StatusOr<Shape> shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length);
xfeed->outfeed()->ReleaseCurrentBuffer(buffer_length, buffer_ptr,
std::move(shape));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReplicaIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
int32_t replica_id = run_options->device_assignment()
->ReplicaIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &replica_id, 4);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void PartitionIdImpl(const ExecutableRunOptions* run_options,
void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
const DeviceAssignment::LogicalID logical_id =
run_options->device_assignment()
->LogicalIdForDevice(GlobalDeviceId(device_ordinal))
.value();
std::memcpy(output_buffer, &logical_id.computation_id, 4);
}
RendezvousKey GetRendezvousKey(const ExecutableRunOptions* run_options,
GlobalDeviceId device,
std::vector<ReplicaGroup> group,
int32_t channel_id_present,
std::optional<bool> use_global_device_ids,
int64_t op_id) {
const DeviceAssignment& device_assignment = *run_options->device_assignment();
RendezvousKey::CollectiveOpKind op_kind = channel_id_present
? RendezvousKey::kCrossModule
: RendezvousKey::kCrossReplica;
std::vector<GlobalDeviceId> participating_devices =
GetParticipatingDevices(GlobalDeviceId(device), device_assignment, group,
GetCollectiveOpGroupMode(channel_id_present != 0,
use_global_device_ids)
.value())
.value();
int num_local_participants = participating_devices.size();
return RendezvousKey{run_options->run_id(), std::move(participating_devices),
num_local_participants, op_kind, op_id};
}
CollectivesInterface* GetInProcessCollectivesImpl() {
static InProcessCollectives* c = new InProcessCollectives();
return c;
}
CollectivesInterface* GetCollectivesImpl(
const ExecutableRunOptions* run_options) {
if (run_options->cpu_executable_run_options() &&
run_options->cpu_executable_run_options()->collectives()) {
return run_options->cpu_executable_run_options()->collectives();
}
return GetInProcessCollectivesImpl();
}
absl::Duration DefaultCollectiveTimeout() { return absl::Minutes(30); }
absl::StatusOr<int> RankInGlobalDevices(
absl::Span<GlobalDeviceId const> devices, GlobalDeviceId device) {
auto it = absl::c_find(devices, device);
if (it == devices.end()) {
return InvalidArgument(
"Device %d not present in global devices %s.", device.value(),
absl::StrJoin(devices, ", ", [](std::string* out, GlobalDeviceId id) {
absl::StrAppend(out, id.value());
}));
}
return std::distance(devices.begin(), it);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllToAllImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int64_t op_id,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t num_buffers,
int64_t buffer_size, void** source_buffers,
void** destination_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
std::nullopt, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(source_buffers,
sizeof(void*) * num_buffers);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(destination_buffers,
sizeof(void*) * num_buffers);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllToAll(
rendezvous_key, buffer_size,
absl::Span<const void* const>(source_buffers, num_buffers),
absl::Span<void* const>(destination_buffers, num_buffers),
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllGatherImpl(const ExecutableRunOptions* run_options,
int32_t channel_id_present, int32_t use_global_device_ids,
int64_t op_id, const void* replica_groups_str,
int32_t replica_groups_str_size, int64_t buffer_size,
void* source_buffer, void* destination_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->AllGather(rendezvous_key, buffer_size,
source_buffer, destination_buffer,
DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void ReduceScatterImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size,
int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, int32_t element_type,
int64_t chunk_elems, void* input_buffer,
void* output_buffer) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
TF_CHECK_OK(communicator->ReduceScatter(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
static_cast<PrimitiveType>(element_type), chunk_elems, input_buffer,
output_buffer, DefaultCollectiveTimeout()));
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
void AllReduceImpl(const ExecutableRunOptions* run_options,
const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t channel_id_present,
int32_t use_global_device_ids, int64_t op_id,
int32_t reduction_kind, const void* shape_ptr,
int32_t shape_length, int32_t num_buffers,
void** input_buffers, void** output_buffers) {
GlobalDeviceId device(GetDeviceOrdinal(run_options));
std::string_view replica_groups_serialized(
static_cast<const char*>(replica_groups_str), replica_groups_str_size);
std::vector<ReplicaGroup> group =
ParseReplicaGroupsOnly(replica_groups_serialized).value();
RendezvousKey rendezvous_key =
GetRendezvousKey(run_options, device, group, channel_id_present,
use_global_device_ids, op_id);
auto shape_str = ShapeString(shape_ptr, shape_length);
VLOG(2) << "All-reduce input/output shape : " << shape_str;
Shape shape =
DecodeSelfDescribingShapeConstant(shape_ptr, shape_length).value();
CHECK((num_buffers > 1 && shape.IsTuple()) ||
(num_buffers == 1 && LayoutUtil::IsDenseArray(shape)));
int rank = RankInGlobalDevices(rendezvous_key.global_devices, device).value();
CollectivesInterface* collectives = GetCollectivesImpl(run_options);
auto communicator =
collectives->GetCommunicator(rendezvous_key.global_devices, rank).value();
for (int i = 0; i < num_buffers; i++) {
Shape subshape = num_buffers == 1 ? shape : shape.tuple_shapes(i);
TF_CHECK_OK(communicator->AllReduce(
rendezvous_key, static_cast<ReductionKind>(reduction_kind),
subshape.element_type(), ShapeUtil::ElementsIn(subshape),
input_buffers[i], output_buffers[i], DefaultCollectiveTimeout()));
}
} | #define EIGEN_USE_THREADS
#include "xla/service/cpu/cpu_runtime.h"
#include <memory>
#include <string>
#include <tuple>
#include "absl/strings/str_format.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/array2d.h"
#include "xla/client/local_client.h"
#include "xla/service/cpu/runtime_custom_call_status.h"
#include "xla/service/cpu/runtime_matmul.h"
#include "xla/service/cpu/runtime_matmul_acl.h"
#include "xla/service/cpu/runtime_single_threaded_matmul.h"
#include "xla/service/custom_call_status_internal.h"
#include "xla/types.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class CpuRuntimeTest : public ::testing::Test {};
template <typename T>
std::unique_ptr<Array2D<float>> MaybeTransposeArray2D(const Array2D<T>& array,
bool transpose) {
int64_t output_height = array.height();
int64_t output_width = array.width();
if (transpose) {
std::swap(output_width, output_height);
}
auto output = std::make_unique<Array2D<float>>(output_height, output_width);
for (int y = 0; y < array.height(); y++) {
for (int x = 0; x < array.width(); x++) {
if (transpose) {
(*output)(x, y) = array(y, x);
} else {
(*output)(y, x) = array(y, x);
}
}
}
return output;
}
void CheckMatrixMultiply(const Array2D<float>& a, const Array2D<float>& b,
const Array2D<float>& c) {
for (int i = 0; i < a.height(); ++i) {
for (int j = 0; j < b.width(); ++j) {
float sum = 0.0;
for (int k = 0; k < a.width(); ++k) {
sum += a(i, k) * b(k, j);
}
EXPECT_NEAR(sum, c(i, j), 0.01);
}
}
}
std::unique_ptr<Array2D<float>> EigenMatrixMultiply(const Array2D<float>& a,
const Array2D<float>& b,
bool transpose_lhs,
bool transpose_rhs,
bool single_threaded) {
CHECK_EQ(a.width(), b.height());
int64_t m = a.height();
int64_t n = b.width();
int64_t k = a.width();
auto a_transpose = MaybeTransposeArray2D(a, !transpose_lhs);
auto b_transpose = MaybeTransposeArray2D(b, !transpose_rhs);
auto c_transpose = std::make_unique<Array2D<float>>(n, m);
if (single_threaded) {
__xla_cpu_runtime_EigenSingleThreadedMatMulF32(
nullptr, c_transpose->data(), a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
} else {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "XLAEigen", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
ExecutableRunOptions run_options;
run_options.set_intra_op_thread_pool(&device);
__xla_cpu_runtime_EigenMatMulF32(&run_options, c_transpose->data(),
a_transpose->data(), b_transpose->data(),
m, n, k, transpose_lhs, transpose_rhs);
}
return MaybeTransposeArray2D(*c_transpose, true);
}
struct MatMulShape {
int64_t m;
int64_t k;
int64_t n;
};
MatMulShape MatMulShapes[] = {
MatMulShape{2, 2, 3}, MatMulShape{256, 512, 1024},
MatMulShape{128, 128, 1}, MatMulShape{1, 128, 128},
MatMulShape{1, 32, 128}, MatMulShape{1, 32, 16},
MatMulShape{32, 16, 1}, MatMulShape{32, 128, 1},
};
using MatMulTestParam = std::tuple<MatMulShape, bool, bool, bool>;
class EigenMatMulTest : public CpuRuntimeTest,
public ::testing::WithParamInterface<MatMulTestParam> {
public:
static std::string Name(
const ::testing::TestParamInfo<MatMulTestParam>& info) {
MatMulShape shape = std::get<0>(info.param);
bool transpose_lhs = std::get<1>(info.param);
bool transpose_rhs = std::get<2>(info.param);
bool single_threaded = std::get<3>(info.param);
return absl::StrFormat("EigenMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
transpose_rhs ? "Trhs_" : "",
single_threaded ? "single" : "multi");
}
};
TEST_P(EigenMatMulTest, DoIt) {
MatMulShape shape = std::get<0>(GetParam());
bool transpose_lhs = std::get<1>(GetParam());
bool transpose_rhs = std::get<2>(GetParam());
bool single_threaded = std::get<3>(GetParam());
auto a = MakeLinspaceArray2D(0.0, 1.0, shape.m, shape.k);
auto b = MakeLinspaceArray2D(-2.0, 2.0, shape.k, shape.n);
auto c = EigenMatrixMultiply(*a, *b, transpose_lhs, transpose_rhs,
single_threaded);
CheckMatrixMultiply(*a, *b, *c);
}
INSTANTIATE_TEST_SUITE_P(EigenMatMulTestInstantiaion, EigenMatMulTest,
::testing::Combine(::testing::ValuesIn(MatMulShapes),
::testing::Bool(),
::testing::Bool(),
::testing::Bool()),
EigenMatMulTest::Name);
TEST_F(CpuRuntimeTest, SuccessStatus) {
XlaCustomCallStatus success_status;
ASSERT_TRUE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
TEST_F(CpuRuntimeTest, FailureStatus) {
XlaCustomCallStatus success_status;
XlaCustomCallStatusSetFailure(&success_status, "Failed", 6);
ASSERT_FALSE(__xla_cpu_runtime_StatusIsSuccess(&success_status));
}
}
} | 2,011 |
#ifndef XLA_SERVICE_CPU_CPU_INSTRUCTION_FUSION_H_
#define XLA_SERVICE_CPU_CPU_INSTRUCTION_FUSION_H_
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/instruction_fusion.h"
namespace xla {
namespace cpu {
class CpuInstructionFusion : public InstructionFusion {
public:
CpuInstructionFusion()
: InstructionFusion(CpuInstructionFusion::IsExpensive) {}
~CpuInstructionFusion() override = default;
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
fusion_node_evaluations_.clear();
return InstructionFusion::Run(module, execution_threads);
}
protected:
FusionDecision ShouldFuse(HloInstruction* consumer,
int64_t operand_index) override;
HloInstruction::FusionKind ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) override;
private:
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override;
absl::flat_hash_map<const HloInstruction*, FusionNodeIndexingEvaluation>
fusion_node_evaluations_;
};
}
}
#endif
#include "xla/service/cpu/cpu_instruction_fusion.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
namespace xla {
namespace cpu {
namespace {
bool CanBeLoopFused(const HloInstruction& hlo) {
return hlo.IsElementwise() ||
hlo.opcode() == HloOpcode::kBitcast ||
hlo.opcode() == HloOpcode::kBroadcast ||
hlo.opcode() == HloOpcode::kConcatenate ||
hlo.opcode() == HloOpcode::kDynamicSlice ||
hlo.opcode() == HloOpcode::kDynamicUpdateSlice ||
hlo.opcode() == HloOpcode::kGather ||
hlo.opcode() == HloOpcode::kIota || hlo.opcode() == HloOpcode::kPad ||
hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kReshape ||
hlo.opcode() == HloOpcode::kReverse ||
hlo.opcode() == HloOpcode::kSlice ||
hlo.opcode() == HloOpcode::kTranspose;
}
bool IsNonComplexNonBatchedMatrixVectorDot(const HloInstruction* hlo) {
const Shape& hlo_shape = hlo->shape();
return !ShapeUtil::ElementIsComplex(hlo_shape) &&
hlo->opcode() == HloOpcode::kDot && hlo_shape.dimensions_size() <= 1 &&
hlo->dot_dimension_numbers().lhs_batch_dimensions_size() == 0;
}
bool HasExactlyOneUse(const HloInstruction& hlo_instr) {
return hlo_instr.user_count() == 1 &&
absl::c_count(hlo_instr.users().front()->operands(), &hlo_instr) == 1;
}
bool CanBeOutputFused(const HloInstruction* producer,
const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
IsNonComplexNonBatchedMatrixVectorDot(producer) &&
HasExactlyOneUse(*producer) == 1;
}
bool CanBeOutputFusedIntoSomeOperand(const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
(CanBeOutputFused(consumer->operand(0), consumer) ||
CanBeOutputFused(consumer->operand(1), consumer));
}
}
FusionDecision CpuInstructionFusion::ShouldFuse(HloInstruction* consumer,
int64_t operand_index) {
HloInstruction* producer = consumer->mutable_operand(operand_index);
VLOG(2) << "Considering for fusion: operand " << operand_index << " of "
<< consumer->ToString();
constexpr int kFusionThresholdBytes = 16 * 1024;
if (CanBeOutputFused(producer, consumer)) {
VLOG(2) << "Fusion OK: Can create output fusion.";
return {};
}
if (CanBeOutputFusedIntoSomeOperand(producer)) {
return "Bailing because producer can be output-fused into some operand.";
}
if (!CanBeLoopFused(*producer)) {
return "Producer is not loop-fusible.";
}
if (producer->opcode() != HloOpcode::kFusion && is_expensive(*producer) &&
ReusesOperandElements(consumer, operand_index)) {
return "Fusion is not profitable.";
}
RETURN_IF_NOT_FUSIBLE(InstructionFusion::ShouldFuse(consumer, operand_index));
if (producer->opcode() == HloOpcode::kConstant &&
consumer->opcode() != HloOpcode::kFusion) {
return "Not fusing: insufficient non-constant nodes.";
}
if (producer->opcode() == HloOpcode::kFusion) {
return "Not fusing: producer is itself a fusion node.";
}
if (consumer->opcode() == HloOpcode::kFusion) {
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
if (fusion_node_evaluations_.at(consumer).CodeDuplicationTooHigh(
producer)) {
return "Code duplication too high";
}
}
if (consumer->opcode() == HloOpcode::kDot) {
const Shape& output_shape = consumer->shape();
if (output_shape.dimensions_size() <= 1) {
if (consumer->operand(0)->shape().rank() == 1 && operand_index == 1 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(0)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return {};
} else if (consumer->operand(1)->shape().rank() == 1 &&
operand_index == 0 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(1)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return {};
}
}
}
if (consumer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
consumer->dimensions(),
LayoutUtil::Minor(consumer->operand(0)->shape().layout(), 0))) {
return "Not fusing reductions over major dimensions";
}
if (producer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
producer->dimensions(),
LayoutUtil::Minor(producer->operand(0)->shape().layout(), 0))) {
return "Not fusing reductions over major dimensions";
}
if (consumer->IsLoopFusion()) {
VLOG(2) << "Fusing: consumer is a fusion node.";
return {};
}
if (CanBeLoopFused(*consumer)) {
VLOG(2) << "Fusing: consumer is elementwise or fusible.";
return {};
}
return "Not fusing: not found a fusible case";
}
HloInstruction::FusionKind CpuInstructionFusion::ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) {
return CanBeOutputFused(producer, consumer)
? HloInstruction::FusionKind::kOutput
: HloInstruction::FusionKind::kLoop;
}
HloInstruction* CpuInstructionFusion::FuseInstruction(
HloInstruction* fusion_instruction, HloInstruction* producer) {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation = fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
}
} | #include "xla/service/cpu/cpu_instruction_fusion.h"
#include <algorithm>
#include <memory>
#include <set>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/transpose_folding.h"
#include "xla/shape.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace cpu {
namespace {
using InstructionFusionTest = HloTestBase;
std::unique_ptr<HloInstruction> MakeDot(const Shape& shape, HloInstruction* lhs,
HloInstruction* rhs) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(lhs->shape().rank() - 1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
precision_config);
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_0) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1024, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kExp, arg0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), exp0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_1) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Bitcast) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* bitcast0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kBitcast, exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), bitcast0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Reshape) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* reshape0 =
builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1024, 256}), exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), reshape0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TooLarge) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {32 * 1024}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {32 * 1024, 256}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {32 * 1024, 256}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {256}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_ElementReuse) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {2, 1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_RHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[1024,256] parameter(1)
exponential = f32[1024,256] exponential(arg1)
transpose = f32[256,1024] transpose(exponential), dimensions={1,0}
ROOT dot = f32[1,1024] dot(arg0, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 1));
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_LHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[256,1] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[1,256] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
0, 0));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_TransposeFusion_LHS_NonDefault) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[256,1] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 0));
}
class OpcodeFusionTest : public InstructionFusionTest {
protected:
void RunFusionAndCheckOpcodesWereFused(
HloModule* module, const std::multiset<HloOpcode>& expected_opcodes,
HloInstruction::FusionKind fusion_kind =
HloInstruction::FusionKind::kLoop) {
auto computation = module->entry_computation();
auto did_fusion = CpuInstructionFusion().Run(module);
ASSERT_TRUE(did_fusion.ok());
EXPECT_TRUE(did_fusion.value());
HloInstruction* root = computation->root_instruction();
ASSERT_THAT(root, op::Fusion());
EXPECT_EQ(root->fusion_kind(), fusion_kind);
std::vector<HloOpcode> fused_opcodes(root->fused_instruction_count());
std::transform(root->fused_instructions().begin(),
root->fused_instructions().end(), fused_opcodes.begin(),
[](const HloInstruction* hlo) { return hlo->opcode(); });
EXPECT_EQ(
std::multiset<HloOpcode>(fused_opcodes.begin(), fused_opcodes.end()),
expected_opcodes);
}
HloComputation* CreateAdderToOne(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, arg0, one));
return module->AddEmbeddedComputation(builder.Build());
}
HloComputation* CreateMax(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* arg1 =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "arg1"));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMaximum, arg0, arg1));
return module->AddEmbeddedComputation(builder.Build());
}
};
TEST_F(OpcodeFusionTest, Exponential_Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {1, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* reshape2 =
builder.AddInstruction(HloInstruction::CreateReshape(result_shape, exp1));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Reshape_DynamicSlice_Tanh) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape starts_shape = ShapeUtil::MakeShape(S32, {});
Shape broadcast_shape = ShapeUtil::MakeShape(F32, {1, 8, 8});
Shape reshape_shape = ShapeUtil::MakeShape(F32, {8, 8});
Shape dynamic_slice_shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, starts_shape, "starts"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, starts_shape, "starts"));
HloInstruction* broadcast2 = builder.AddInstruction(
HloInstruction::CreateBroadcast(broadcast_shape, param0, {1}));
HloInstruction* reshape3 = builder.AddInstruction(
HloInstruction::CreateReshape(reshape_shape, broadcast2));
HloInstruction* dynamic_slice4 =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
dynamic_slice_shape, reshape3, {param1, param2}, {4, 4}));
builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_slice_shape, HloOpcode::kTanh, dynamic_slice4));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kTanh, HloOpcode::kDynamicSlice, HloOpcode::kReshape,
HloOpcode::kBroadcast, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape result_shape = ShapeUtil::MakeShape(F32, {8, 8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* broadcast1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(result_shape, param0, {1}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, broadcast1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kBroadcast, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, DynamicSlice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(S32, {});
Shape result_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, slice_shape, "starts"));
HloInstruction* dynamic_slice2 = builder.AddInstruction(
HloInstruction::CreateDynamicSlice(result_shape, param0, {param1}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, dynamic_slice2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kDynamicSlice,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, exp1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kExp, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {16});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(result_shape, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reverse_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reverse1 = builder.AddInstruction(
HloInstruction::CreateReverse(param_shape, param0, {0}));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, reverse1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReverse, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Slice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* slice1 = builder.AddInstruction(
HloInstruction::CreateSlice(slice_shape, param0, {0}, {4}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2}), HloOpcode::kNegate, slice1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kSlice, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Transpose_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {3, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* transpose2 = builder.AddInstruction(
HloInstruction::CreateTranspose(result_shape, exp1, {1, 0}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, transpose2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kTranspose, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, UnaryMapOfExp) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp}, CreateAdderToOne(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, BinaryMapOfExps) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param"));
HloInstruction* exp0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param1));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp0, exp1}, CreateMax(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kExp, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, DynamicSliceWithDynamicUpdateSlice) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {10, 100, 1000});
Shape slice_shape = ShapeUtil::MakeShape(F32, {10, 1, 1000});
std::vector<HloInstruction*> slice_indices, update_indices;
for (int i = 0; i < 3; ++i) {
slice_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
1 + i, ShapeUtil::MakeShape(U32, {}), "slice_indices")));
update_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
5 + i, ShapeUtil::MakeShape(U32, {}), "update_indices")));
}
HloInstruction* slice =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
slice_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(0, full_shape, "slice_from")),
slice_indices,
{10, 1, 1000}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "to_update")),
slice, update_indices));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicUpdateSlice,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, MessOfFusibleNodes) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {4, 100, 10, 100, 50});
auto loop_idx = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param0"));
auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "param1"));
auto idx_choice = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}),
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}),
builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(S32, {4}), "param2")),
{loop_idx},
{1}))));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
auto slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {1, 100, 10, 100, 50}),
builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {100, 100, 10, 100, 50}), "param3")),
{idx_choice, zero, zero, zero, zero},
{1, 100, 10, 100, 50}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "param4")),
slice, {loop_idx, param1, param1, param1, param1}));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicSlice,
HloOpcode::kDynamicUpdateSlice, HloOpcode::kReshape,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter});
}
void CreateComputationForDotAddOutputFusionTest(const std::string& test_name,
HloModule* module, int m, int k,
int n,
bool add_extra_use_for_dot) {
HloComputation::Builder builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
auto* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
auto* dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_rhs_shape, "param1"));
auto* addend = builder.AddInstruction(
HloInstruction::CreateParameter(2, dot_shape, "param2"));
auto* dot =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
builder.AddInstruction(
HloInstruction::CreateBinary(dot_shape, HloOpcode::kAdd, dot, addend));
if (add_extra_use_for_dot) {
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
builder.AddInstruction(
HloInstruction::CreateOutfeed(dot_shape, dot, token, "no_config"));
}
module->AddEntryComputation(builder.Build());
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_1x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 1,
50, 19,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 19,
false);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1_multi_use) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
true);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_DontOutputFuseDuplicateOperands) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[60,1]{1,0} parameter(1)
c = f32[50,1]{1,0} dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT d = f32[50,1]{1,0} add(c, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
struct GatherLoopFusionTestSpec {
std::string test_name;
std::string hlo_computation_text;
static std::string Name(
const ::testing::TestParamInfo<GatherLoopFusionTestSpec>& info) {
return info.param.test_name;
}
};
class GatherLoopFusionTest
: public OpcodeFusionTest,
public ::testing::WithParamInterface<GatherLoopFusionTestSpec> {};
TEST_P(GatherLoopFusionTest, GatherLoopFusion) {
const GatherLoopFusionTestSpec& spec = GetParam();
std::string hlo_string = absl::StrCat("HloModule ", spec.test_name, "\n\n",
spec.hlo_computation_text);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kGather, HloOpcode::kAdd, HloOpcode::kBroadcast,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter});
}
std::vector<GatherLoopFusionTestSpec> GetGatherLoopFusionTestSpecs() {
std::vector<GatherLoopFusionTestSpec> result;
result.push_back({"FusedTensorFlowGatherV2", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
one = s32[] constant(1)
one_broadcasted = s32[3,2] broadcast(one), dimensions={}
ROOT result = s32[3,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherMultipleBatchDims", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,3,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2, | 2,012 |
#ifndef XLA_SERVICE_CPU_ONEDNN_MATMUL_H_
#define XLA_SERVICE_CPU_ONEDNN_MATMUL_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "dnnl.hpp"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/shape.h"
namespace xla {
namespace cpu {
Shape OneDnnMatMulOptWeightsShape(const Shape& input_shape,
const Shape& weights_shape,
const Shape& bias_shape,
const Shape& output_shape,
const OneDnnMatMulConfig* matmul_config);
extern "C" {
extern void __xla_cpu_runtime_OneDnnMatMul(void* result, void* scratch,
void** args);
extern void __xla_cpu_runtime_OneDnnMatMulReorder(void* result, void** args);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_matmul.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <utility>
#include <vector>
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "tsl/platform/logging.h"
#define EIGEN_USE_THREADS
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::matmul;
using dnnl::memory;
using dnnl::stream;
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const dnnl::memory::desc& input_md,
const dnnl::memory::desc& weights_md, const dnnl::memory::desc& bias_md,
const dnnl::memory::desc& output_md) {
auto weights_any_md =
memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
dnnl::memory::format_tag::any);
auto matmul_pd = matmul::primitive_desc(engine, input_md, weights_any_md,
bias_md, output_md);
return matmul_pd.weights_desc();
}
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const Shape& input_shape,
const Shape& weights_shape, const Shape& bias_shape,
const Shape& output_shape, const OneDnnMatMulConfig* matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_b(), weights_md);
auto bias_md = absl::c_count(matmul_config->fusions().ops(),
OneDnnFusionConfig::BIAS) > 0
? ShapeToMemDesc(bias_shape)
: dnnl::memory::desc{};
auto output_md = ShapeToMemDesc(output_shape);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (!bias_md.is_zero() && missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
return OneDnnMatMulOptWeightsDesc(engine, input_md, weights_md, bias_md,
output_md);
}
}
Shape OneDnnMatMulOptWeightsShape(const Shape& input_shape,
const Shape& weights_shape,
const Shape& bias_shape,
const Shape& output_shape,
const OneDnnMatMulConfig* matmul_config) {
engine cpu_engine(engine::kind::cpu, 0);
auto optimized_weights_md =
OneDnnMatMulOptWeightsDesc(cpu_engine, input_shape, weights_shape,
bias_shape, output_shape, matmul_config);
return MemDescToXlaShapeFlattened(optimized_weights_md);
}
struct FusedOperandsRef {
const std::vector<void*>& bufs;
std::vector<std::pair<int, dnnl::memory>>& postop_args;
};
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const engine& cpu_engine, const memory::desc& input_md,
const memory::desc& plain_weights_md, const memory::desc& output_md,
const std::vector<memory::desc>& fused_mds,
const OneDnnMatMulConfig& matmul_config,
FusedOperandsRef* fused_operands_ref = nullptr) {
auto bias_md = memory::desc();
bool weights_packed = matmul_config.optimization_config().weights_prepacked();
auto weights_md = plain_weights_md;
if (weights_packed) {
weights_md = memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
memory::format_tag::any);
}
dnnl::post_ops post_ops;
int fused_operand_idx = 0;
for (auto& fused_op : matmul_config.fusions().ops()) {
switch (fused_op) {
case OneDnnFusionConfig::RELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_relu, 0.f, 0.f);
break;
case OneDnnFusionConfig::TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_ERF:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_erf, 0.f, 0.f);
break;
case OneDnnFusionConfig::RELU6:
post_ops.append_eltwise(dnnl::algorithm::eltwise_clip_v2, 0.f, 6.0f);
break;
case OneDnnFusionConfig::SIGMOID:
post_ops.append_eltwise(dnnl::algorithm::eltwise_logistic, 0.f, 0.f);
break;
case OneDnnFusionConfig::BIAS: {
bias_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
if (fused_operands_ref) {
fused_operands_ref->postop_args.emplace_back(
DNNL_ARG_BIAS,
dnnl::memory(bias_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
fused_operand_idx++;
} break;
case OneDnnFusionConfig::ELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_elu, 1.0f, 0.0f);
break;
case OneDnnFusionConfig::BINARY_ADD: {
auto binary_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - binary_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto binary_dims = binary_md.get_dims();
binary_dims.insert(binary_dims.begin(), missed_rank, 1);
binary_md = binary_md.reshape(binary_dims);
}
if (fused_operands_ref) {
auto arg_idx =
DNNL_ARG_ATTR_MULTIPLE_POST_OP(post_ops.len()) | DNNL_ARG_SRC_1;
fused_operands_ref->postop_args.emplace_back(
arg_idx,
dnnl::memory(binary_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
post_ops.append_binary(dnnl::algorithm::binary_add, binary_md);
fused_operand_idx++;
} break;
case OneDnnFusionConfig::LINEAR: {
float const_float;
*(reinterpret_cast<int32_t*>(&const_float)) =
matmul_config.fusions().alpha_typecast();
post_ops.append_eltwise(dnnl::algorithm::eltwise_linear, const_float,
0.f);
} break;
default:
LOG(FATAL) << __FILE__ << ":" << __LINE__
<< " Attempt to call OneDNN MatMul runtime library with "
"unsupported post op."
<< std::endl;
}
}
dnnl::primitive_attr attrs;
if (matmul_config.optimization_config().user_scratchpad()) {
attrs.set_scratchpad_mode(dnnl::scratchpad_mode::user);
}
if (post_ops.len() > 0) {
attrs.set_post_ops(post_ops);
}
return std::make_unique<matmul::primitive_desc>(
cpu_engine, input_md, weights_md, bias_md, output_md, attrs);
}
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const Shape& input_shape, const Shape& weights_shape,
const Shape& output_shape, const std::vector<Shape>& fused_shapes,
const OneDnnMatMulConfig& matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weights_md);
auto output_md = ShapeToMemDesc(output_shape);
std::vector<memory::desc> fused_mds;
std::transform(fused_shapes.begin(), fused_shapes.end(),
std::back_inserter(fused_mds),
[](const Shape& shape) { return ShapeToMemDesc(shape); });
return CreateMatMulPrimDesc(engine(engine::kind::cpu, 0), input_md,
weights_md, output_md, fused_mds, matmul_config);
}
template <>
std::unique_ptr<dnnl::matmul::primitive_desc>
CreateOneDnnPrimDesc<dnnl::matmul::primitive_desc>(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kCustomCall) {
return nullptr;
}
auto custom_call = Cast<xla::HloCustomCallInstruction>(instr);
auto backend_config = custom_call->backend_config<BackendConfig>();
if (!backend_config.ok()) {
return nullptr;
}
auto& matmul_config = backend_config.value().onednn_matmul_config();
auto operands = custom_call->operands();
auto input = operands[0];
auto weight = operands[1];
auto input_shape = input->shape();
auto weight_shape = weight->shape();
auto output_shape = custom_call->shape().IsTuple()
? custom_call->shape().tuple_shapes(0)
: custom_call->shape();
auto fused_operands =
HloInstruction::InstructionVector(operands.begin() + 2, operands.end());
std::vector<Shape> fused_shapes;
std::transform(fused_operands.begin(), fused_operands.end(),
std::back_inserter(fused_shapes),
[](const HloInstruction* instr) { return instr->shape(); });
return CreateMatMulPrimDesc(input_shape, weight_shape, output_shape,
fused_shapes, matmul_config);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMul(
void* result, void* scratch, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weights_minfo(args[arg_indx++]);
MemrefInfo output_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weights_md = weights_minfo.GetOneDnnMemDesc();
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_a() && input_md.get_ndims() > 1, input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_b() && weights_md.get_ndims() > 1, weights_md);
auto output_md = output_minfo.GetOneDnnMemDesc();
if (matmul_config.optimization_config().weights_prepacked()) {
weights_md =
memory::desc({input_md.get_dims().back(), output_md.get_dims().back()},
weights_md.get_data_type(), memory::format_tag::ab);
}
const int64_t num_fused_operands = num_args - arg_indx;
std::vector<memory::desc> fused_mds;
std::vector<void*> fused_bufs;
for (int64_t i = 0; i < num_fused_operands; ++i) {
MemrefInfo operand_minfo(args[arg_indx++]);
fused_mds.push_back(operand_minfo.GetOneDnnMemDesc());
fused_bufs.push_back(operand_minfo.Data());
}
std::vector<std::pair<int, dnnl::memory>> postop_args;
FusedOperandsRef fused_operands_ref{fused_bufs, postop_args};
auto matmul_pd =
CreateMatMulPrimDesc(cpu_engine, input_md, weights_md, output_md,
fused_mds, matmul_config, &fused_operands_ref);
XLA_LIGHTWEIGHT_CHECK(num_args == arg_indx);
auto lhs_mem = memory(input_md, cpu_engine, input_minfo.Data());
auto rhs_mem =
memory(matmul_pd->weights_desc(), cpu_engine, weights_minfo.Data());
auto result_mem = memory(output_md, cpu_engine, output_minfo.Data());
if (std::strstr(matmul_pd->impl_info_str(), "ref") != nullptr) {
LOG(WARNING) << "[Perf]: MatMul reference implementation being executed";
}
auto matmul_prim = matmul(*matmul_pd);
std::unordered_map<int, memory> matmul_args{{DNNL_ARG_SRC, lhs_mem},
{DNNL_ARG_WEIGHTS, rhs_mem},
{DNNL_ARG_DST, result_mem}};
if (matmul_config.optimization_config().user_scratchpad()) {
XLA_LIGHTWEIGHT_CHECK(scratch != nullptr);
MemrefInfo scratch_minfo(scratch);
auto scratchpad_md = matmul_pd->scratchpad_desc();
auto scratch_mem = memory(scratchpad_md, cpu_engine, scratch_minfo.Data());
matmul_args.insert({DNNL_ARG_SCRATCHPAD, scratch_mem});
}
matmul_args.insert(postop_args.begin(), postop_args.end());
matmul_prim.execute(onednn_stream, matmul_args);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMulReorder(
void* result, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weight_minfo(args[arg_indx++]);
MemrefInfo output_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weight_md = weight_minfo.GetOneDnnMemDesc();
auto output_md = output_minfo.GetOneDnnMemDesc();
auto bias_md = dnnl::memory::desc{};
if (absl::c_count(matmul_config.fusions().ops(), OneDnnFusionConfig::BIAS) >
0) {
MemrefInfo bias_minfo(args[arg_indx++]);
bias_md = bias_minfo.GetOneDnnMemDesc();
}
XLA_LIGHTWEIGHT_CHECK(num_args >= arg_indx);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weight_md);
if (!bias_md.is_zero()) {
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
}
auto result_md = OneDnnMatMulOptWeightsDesc(cpu_engine, input_md, weight_md,
bias_md, output_md);
XLA_LIGHTWEIGHT_CHECK(result_minfo.GetOneDnnMemDesc().get_size() ==
result_md.get_size());
auto weight_mem = dnnl::memory{weight_md, cpu_engine, weight_minfo.Data()};
auto result_mem = dnnl::memory{result_md, cpu_engine, result_minfo.Data()};
dnnl::reorder rdr{weight_mem, result_mem};
rdr.execute(onednn_stream, weight_mem, result_mem);
onednn_stream.wait();
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/cpu/onednn_matmul_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/cpu_info.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace cpu {
class MatmulTest : public HloTestBase {
protected:
const char* fused_matmul_bias_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_binary_add_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* matmul_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_tanh_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_erf_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_elu_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","ELU"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_tanh_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_relu6_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","RELU6"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_sigmoid_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","SIGMOID"]
; CHECK-DAG: }
; CHECK: }
)";
};
TEST_F(MatmulTest, SimpleTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f32[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = bf16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = bf16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg.0 = f16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeB) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,128,64]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,128,64]{3,1,2,0} parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
reshape.2 = f32[32,32,40,30] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[32,32,30,40] broadcast(constant.3), dimensions={}
dot.7 = f32[32,32,40,40] dot(reshape.2, broadcast.4), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
constant.5 = f32[] constant(15)
broadcast.6 = f32[40] broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,32,40,40] broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broadcast.9)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[400,300] parameter(0), parameter_replication={false}
reshape.2 = f32[400,300] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[300,400] broadcast(constant.3), dimensions={}
dot.7 = f32[400,400] dot(reshape.2, broadcast.4), lhs_batch_dims={}, lhs_contracting_dims={1}, rhs_batch_dims={}, rhs_contracting_dims={0}
reshape.1 = f32[400,1,400] reshape(dot.7)
constant.5 = f32[] constant(15)
broadcast.6 = f32[400] broadcast(constant.5), dimensions={}
broadcast.9 = f32[400,1,400] broadcast(broadcast.6), dimensions={2}
add.10 = f32[400,1,400] add(reshape.1, broadcast.9)
tuple.12 = (f32[400,1,400]) tuple(add.10)
ROOT get-tuple-element.13 = f32[400,1,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[32,32,40,40] parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
add.10 = f32[32,32,40,40] add(dot.7, arg0.3)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[40]{0} parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[32,32,40,40] broadcast(arg0.3), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broad.1)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[2,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[2,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[2,400] parameter(2), parameter_replication={false}
dot.7 = f32[2,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[2,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[2,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[2,2,400,400] reshape(add.10)
tuple.12 = (f32[2,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[2,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D1B) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[1,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[1,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[1,400] parameter(2), parameter_replication={false}
dot.7 = f32[1,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[1,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[1,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[1,2,400,400] reshape(add.10)
tuple.12 = (f32[1,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[1,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter3) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[16,128,768] parameter(0), sharding={replicated}
arg0.2 = f32[768,768] parameter(1), sharding={replicated}
dot.84 = f32[16,128,768] dot(arg0.1, arg0.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
arg0.3 = f32[768]{0} parameter(2), sharding={replicated}
reshape.85 = f32[1,1,768] reshape(arg0.3)
broadcast.86 = f32[1,1,768] broadcast(reshape.85), dimensions={0,1,2}
reshape.87 = f32[768]{0} reshape(broadcast.86)
broadcast.88 = f32[16,128,768] broadcast(reshape.87), dimensions={2}
ROOT add.89 = f32[16,128,768] add(dot.84, broadcast.88)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeBWithBiasAddFusion) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,4,16]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,16,16]{3,1,2,0} parameter(1), parameter_replication={false}
dot.7 = f32[32,8,4,16]{3,2,1,0} dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
constant.5 = f32[] constant(15)
broadcast.6 = f32[16]{0} broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,8,4,16]{3,2,1,0} broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,8,4,16]{3,2,1,0} add(dot.7, broadcast.9)
reshape.11 = f32[32,8,4,16]{3,2,1,0} reshape(add.10)
tuple.12 = (f32[32,8,4,16]{3,2,1,0}) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,8,4,16]{3,2,1,0} get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, F32BiasAddFusionNonCompatibleBias) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.1 {
arg.0 = f32[12288,2] parameter(0), parameter_replication={false}
arg.1 = f32[2,1024] parameter(1), parameter_replication={false}
dot.0 = f32[12288,1024] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.0 = f32[32,384,1024] reshape(dot.0)
constant.0 = f32[1,384,1024] constant(15)
reshape.1 = f32[384,1024] reshape(constant.0)
broadcast.0 = f32[32,384,1024] broadcast(reshape.1), dimensions={1,2}
add.0 = f32[32,384,1024] add(reshape.0, broadcast.0)
tuple.0 = (f32[32,384,1024]) tuple(add.0)
ROOT get-tuple-element.0 = f32[32,384,1024] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, ApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, onednn.matmul.0)
mul.1 = f32[32,32,4,32] multiply(onednn.matmul.0, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.2 = f32[32,32,4,32] multiply(mul.1, bcast.0)
add.0 = f32[32,32,4,32] add(onednn.matmul.0, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
mul.3 = f32[32,32,4,32] multiply(add.0, bcast.1)
tanh = f32[32,32,4,32] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
add.2 = f32[32,32,4,32] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[32,32,4,32] broadcast(const.3), dimensions={}
mul.4 = f32[32,32,4,32] multiply(add.2, bcast.3)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.4)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
Arg_5.6 = f32[32,32,64] parameter(0), sharding={replicated}
Arg_7.8 = f32[64,256] parameter(1), sharding={replicated}
dot.232 = f32[32,32,256] dot(Arg_5.6, Arg_7.8), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_6.7 = f32[256] parameter(2), sharding={replicated}
reshape.233 = f32[1,1,256] reshape(Arg_6.7)
broadcast.234 = f32[1,1,256] broadcast(reshape.233), dimensions={0,1,2}
reshape.235 = f32[256] reshape(broadcast.234)
broadcast.236 = f32[32,32,256] broadcast(reshape.235), dimensions={2}
add.237 = f32[32,32,256] add(dot.232, broadcast.236)
multiply.238 = f32[32,32,256] multiply(add.237, add.237)
multiply.239 = f32[32,32,256] multiply(add.237, multiply.238)
constant.20 = f32[] constant(0.044715)
broadcast.21 = f32[32,32,256] broadcast(constant.20), dimensions={}
multiply.240 = f32[32,32,256] multiply(multiply.239, broadcast.21)
add.241 = f32[32,32,256] add(add.237, multiply.240)
constant.18 = f32[] constant(0.797884583)
broadcast.19 = f32[32,32,256] broadcast(constant.18), dimensions={}
multiply.242 = f32[32,32,256] multiply(add.241, broadcast.19)
tanh.243 = f32[32,32,256] tanh(multiply.242)
constant.16 = f32[] constant(1)
broadcast.17 = f32[32,32,256] broadcast(constant.16), dimensions={}
add.244 = f32[32,32,256] add(tanh.243, broadcast.17)
constant.14 = f32[] constant(0.5)
broadcast.15 = f32[32,32,256] broadcast(constant.14), dimensions={}
multiply.245 = f32[32,32,256] multiply(add.244, broadcast.15)
ROOT out = f32[32,32,256] multiply(add.237, multiply.245)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
dot.7 = f32[1024,256] dot(arg0.1, arg1.2), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast.9 = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.10 = f32[1024,256] add(dot.7, broadcast.9)
constant.12 = f32[] constant(0.044715)
broadcast.13 = f32[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f32[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f32[1024,256] multiply(add.10, add.10)
multiply.15 = f32[1024,256] multiply(multiply.14, multiply.11)
add.16 = f32[1024,256] add(add.10, multiply.15)
constant.17 = f32[] constant(0.797884583)
broadcast.18 = f32[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f32[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f32[1024,256] tanh(multiply.19)
constant.21 = f32[] constant(1)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
add.23 = f32[1024,256] add(tanh.20, broadcast.22)
constant.24 = f32[] constant(0.5)
broadcast.25 = f32[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f32[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f32[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
convert.9 = bf16[256,512] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.13 = f32[1024,256] add(convert, broadcast)
constant.16 = f32[] constant(0.044715)
broadcast.17 = f32[1024,256] broadcast(constant.16), dimensions={}
multiply.18 = f32[1024,256] multiply(broadcast.17, add.13)
multiply.15 = f32[1024,256] multiply(add.13, add.13)
multiply.19 = f32[1024,256] multiply(multiply.18, multiply.15)
add.20 = f32[1024,256] add(add.13, multiply.19)
constant.21 = f32[] constant(0.797884583)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
multiply.23 = f32[1024,256] multiply(add.20, broadcast.22)
tanh.24 = f32[1024,256] tanh(multiply.23)
constant.25 = f32[] constant(1)
broadcast.26 = f32[1024,256] broadcast(constant.25), dimensions={}
add.27 = f32[1024,256] add(tanh.24, broadcast.26)
constant.1 = f32[] constant(0.5)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
multiply.30 = f32[1024,256] multiply(add.13, broadcast.2)
ROOT multiply.32 = f32[1024,256] multiply(add.27, multiply.30)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f16[1024,512] parameter(0), parameter_replication={false}
reshape.4 = f16[1024,512] reshape(arg0.1)
arg1.2 = f16[256,512] parameter(1), parameter_replication={false}
reshape.5 = f16[256,512] reshape(arg1.2)
dot.7 = f16[1024,256] dot(reshape.4, reshape.5), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
transpose.8 = f16[1024,256] transpose(dot.7), dimensions={0,1}
arg2.3 = f16[256] parameter(2), parameter_replication={false}
reshape.6 = f16[256] reshape(arg2.3)
broadcast.9 = f16[1024,256] broadcast(reshape.6), dimensions={1}
add.10 = f16[1024,256] add(transpose.8, broadcast.9)
constant.12 = f16[] constant(0.044708)
broadcast.13 = f16[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f16[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f16[1024,256] multiply(add.10, add.10)
multiply.15 = f16[1024,256] multiply(multiply.14, multiply.11)
add.16 = f16[1024,256] add(add.10, multiply.15)
constant.17 = f16[] constant(0.79785)
broadcast.18 = f16[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f16[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f16[1024,256] tanh(multiply.19)
constant.21 = f16[] constant(1)
broadcast.22 = f16[1024,256] broadcast(constant.21), dimensions={}
add.23 = f16[1024,256] add(tanh.20, broadcast.22)
constant.24 = f16[] constant(0.5)
broadcast.25 = f16[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f16[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f16[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, ExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[] constant(0.707106769)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, bcast.0)
erf.0 = f32[32,32,4,32] erf(mul.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
add.0 = f32[32,32,4,32] add(erf.0, bcast.1)
const.2 = f32[] constant(0.5)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
mul.1 = f32[32,32,4,32] multiply(add.0, bcast.2)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.1)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
dot.378 = f32[6304,3072] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.11 = f32[32,197,3072]reshape(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.11, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
multiply.393 = f32[32,197,3072] multiply(add.390, broadcast.392)
multiply.394 = f32[32,197,3072] multiply(multiply.393, add.383)
ROOT out = f32[6304,3072] reshape(multiply.394)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactGELUTestBF16) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[6304,3072] broadcast(constant.381), dimensions={1}
add.383 = f32[6304,3072] add(convert.2, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[6304,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[6304,3072] multiply(broadcast.385, add.383)
erf.387 = f32[6304,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[6304,3072] broadcast(constant.388), dimensions={}
add.390 = f32[6304,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[6304,3072] broadcast(constant.391)
multiply.393 = f32[6304,3072] multiply(add.390, broadcast.392)
ROOT out = f32[6304,3072] multiply(multiply.393, add.383)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactJaxGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
reshape.0 = f32[32,197,3072] reshape(convert.2)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.0, broadcast.382)
constant.384 = f32[] constant(0.707182348)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
multiply.393 = f32[32,197,3072] multiply(add.390, add.383)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
ROOT multiply.394 = f32[32,197,3072] multiply(multiply.393, broadcast.392)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[512,256] parameter(1), parameter_replication={false}
convert.9 = bf16[512,256] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={0}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32 | 2,013 |
#ifndef XLA_SERVICE_CPU_IR_EMITTER2_H_
#define XLA_SERVICE_CPU_IR_EMITTER2_H_
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/cpu/ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/shape.h"
#include "xla/stream_executor/launch_dim.h"
namespace xla::cpu {
class IrEmitter2 {
public:
IrEmitter2(const HloModule& hlo_module, llvm::Module* module,
IrEmitter* nested_ir_emitter);
struct KernelThreadDims {
llvm::Value* x;
llvm::Value* y;
llvm::Value* z;
};
struct KernelThread {
llvm::Value* x;
llvm::Value* y;
llvm::Value* z;
};
struct KernelPrototype {
llvm::Function* function;
KernelThreadDims thread_dims;
KernelThread thread;
std::vector<llvm_ir::IrArray> arguments;
std::vector<llvm_ir::IrArray> results;
};
struct KernelInfo {
std::string name;
se::BlockDim block_dims;
se::ThreadDim thread_dims;
};
absl::Span<const KernelInfo> kernels() const { return kernels_; }
absl::StatusOr<KernelInfo> EmitElementalHostKernel(
const HloInstruction* instr);
absl::StatusOr<KernelInfo> EmitFusionHostKernel(
const HloFusionInstruction* fusion);
absl::StatusOr<KernelInfo> EmitReductionHostKernel(
const HloInstruction* instr);
absl::StatusOr<KernelInfo> EmitDotHostKernel(const HloInstruction* instr);
absl::StatusOr<KernelInfo> EmitDotFusionHostKernel(
const HloFusionInstruction* fusion);
absl::StatusOr<KernelInfo> EmitSelectAndScatterHostKernel(
const HloInstruction* instr);
KernelPrototype EmitKernelPrototype(std::string_view name,
absl::Span<const Shape> arguments,
absl::Span<const Shape> results);
KernelPrototype EmitKernelPrototype(const HloInstruction* instr);
private:
class ElementalIrEmitter;
using ParallelPartitionBounds =
std::vector<std::pair<llvm::Value*, llvm::Value*>>;
struct ParallelConfig {
std::vector<int64_t> outer_dimension_partitions;
};
KernelThreadDims EmitKernelThreadDims(llvm::IRBuilder<>& b,
llvm::Value* call_frame);
KernelThread EmitKernelThread(llvm::IRBuilder<>& b, llvm::Value* call_frame);
llvm_ir::IrArray EmitKernelArgument(llvm::IRBuilder<>& b,
llvm::Value* call_frame, int64_t index,
const Shape& shape);
std::optional<ParallelConfig> GetParallelConfig(const HloInstruction* instr);
ParallelPartitionBounds EmitParallelPartitionBounds(
llvm::IRBuilder<>& b, const KernelPrototype& kernel_prototype,
const ParallelConfig& parallel_config, const Shape& shape,
std::string_view name);
absl::StatusOr<se::ThreadDim> EmitElementalLoops(
llvm::IRBuilder<>& b, const HloInstruction* instr,
const KernelPrototype& kernel_prototype,
const llvm_ir::ElementGenerator& element_generator);
bool fast_min_max() const;
const HloModule& hlo_module_;
llvm::Module* module_;
IrEmitter* nested_ir_emitter_;
llvm::StructType* call_frame_ty_;
llvm::StructType* thread_dims_ty_;
llvm::StructType* thread_ty_;
llvm::StructType* arg_ty_;
std::vector<KernelInfo> kernels_;
};
}
#endif
#include "xla/service/cpu/ir_emitter2.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "xla/cpu_function_runtime.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/elemental_math_emitter.h"
#include "xla/service/cpu/ir_emitter.h"
#include "xla/service/cpu/parallel_loop_emitter.h"
#include "xla/service/cpu/shape_partition.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::cpu {
namespace {
static std::vector<Shape> FlattenedParameters(const HloInstruction* instr) {
std::vector<Shape> parameters;
for (auto* operand : instr->operands()) {
for (auto& indexed : ShapeUtil::GetLeafShapes(operand->shape())) {
parameters.push_back(indexed.shape);
}
}
return parameters;
}
static std::vector<Shape> FlattenedResults(const HloInstruction* instr) {
std::vector<Shape> results;
for (auto& indexed : ShapeUtil::GetLeafShapes(instr->shape())) {
results.push_back(indexed.shape);
}
return results;
}
static llvm::StructType* Dim3StructTy(llvm::LLVMContext& ctx,
std::string_view name) {
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create(name, i64, i64, i64);
}
static llvm::StructType* KernelThreadDimTy(llvm::LLVMContext& ctx) {
return Dim3StructTy(ctx, "SE_HOST_KernelThreadDim");
}
static llvm::StructType* KernelThreadTy(llvm::LLVMContext& ctx) {
return Dim3StructTy(ctx, "SE_HOST_KernelThread");
}
static llvm::StructType* KernelArgTy(llvm::LLVMContext& ctx) {
auto* ptr = llvm::PointerType::getUnqual(ctx);
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create("SE_HOST_KernelArg", ptr, i64);
}
static llvm::StructType* KernelCallFrameTy(llvm::LLVMContext& ctx) {
auto* ptr = llvm::PointerType::getUnqual(ctx);
auto* i64 = llvm::IntegerType::getInt64Ty(ctx);
return llvm::StructType::create("SE_HOST_KernelCallFrame", ptr, ptr, i64,
ptr);
}
static llvm::FunctionType* KernelFunctionTy(llvm::LLVMContext& ctx) {
return llvm::FunctionType::get(llvm::PointerType::getUnqual(ctx),
llvm::PointerType::getUnqual(ctx),
false);
}
}
class IrEmitter2::ElementalIrEmitter : public xla::ElementalIrEmitter {
public:
ElementalIrEmitter(llvm::Module* module, llvm::IRBuilder<>* b,
const HloModule* hlo_module, IrEmitter* nested_ir_emitter,
bool fast_min_max)
: xla::ElementalIrEmitter(module, b),
hlo_module_(hlo_module),
nested_ir_emitter_(nested_ir_emitter),
fast_min_max_(fast_min_max) {}
protected:
absl::StatusOr<llvm::Value*> EmitAtan2(PrimitiveType prim_type,
llvm::Value* lhs, llvm::Value* rhs,
absl::string_view) override {
return xla::cpu::EmitAtan2(module(), *b(), prim_type, lhs, rhs);
}
absl::StatusOr<llvm::Value*> EmitTanh(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitTanh(module(), *b(), prim_type, value);
}
absl::StatusOr<llvm::Value*> EmitErf(PrimitiveType prim_type,
llvm::Value* value) override {
return xla::cpu::EmitErf(module(), *b(), prim_type, value);
}
absl::StatusOr<std::vector<llvm::Value*>> EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name, bool is_reducer) override {
if (!hlo_module_ || !hlo_module_->has_schedule()) {
return absl::InternalError(
"HLO module must be scheduled to emit thread local computation.");
}
auto emit_computation = [&](const HloComputation* computation) {
if (!nested_ir_emitter_->is_computation_emitted(*computation,
is_reducer)) {
VLOG(2) << "Emit nested computation: " << computation->name();
TF_RETURN_IF_ERROR(
nested_ir_emitter_
->EmitComputation(
const_cast<HloComputation*>(computation), name, false,
hlo_module_->schedule()
.sequence(computation)
.instructions(),
is_reducer,
{llvm::Attribute::AlwaysInline})
.status());
}
return absl::OkStatus();
};
for (HloComputation* embedded : callee.MakeEmbeddedComputationsList()) {
if (embedded->IsFusionComputation()) continue;
TF_RETURN_IF_ERROR(emit_computation(embedded));
}
TF_RETURN_IF_ERROR(emit_computation(&callee));
VLOG(2) << "Emit thread local call to: " << callee.name();
nested_ir_emitter_->b()->SetInsertPoint(b()->GetInsertPoint());
auto values = nested_ir_emitter_->EmitThreadLocalCall(
callee, parameters, name, is_reducer, false);
return values;
}
bool fast_min_max() override { return fast_min_max_; }
private:
const HloModule* hlo_module_;
IrEmitter* nested_ir_emitter_;
bool fast_min_max_;
};
IrEmitter2::IrEmitter2(const HloModule& hlo_module, llvm::Module* module,
IrEmitter* nested_ir_emitter)
: hlo_module_(hlo_module),
module_(module),
nested_ir_emitter_(nested_ir_emitter),
call_frame_ty_(KernelCallFrameTy(module_->getContext())),
thread_dims_ty_(KernelThreadDimTy(module_->getContext())),
thread_ty_(KernelThreadTy(module_->getContext())),
arg_ty_(KernelArgTy(module_->getContext())) {}
bool IrEmitter2::fast_min_max() const {
return hlo_module_.config().debug_options().xla_cpu_enable_fast_min_max();
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitElementalHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit elemental host kernel: " << instr->name();
KernelPrototype kernel_prototype = EmitKernelPrototype(instr);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
const HloInstruction* operand = instr->operand(i);
operand_to_generator[operand] = [&, i](const llvm_ir::IrArray::Index& idx) {
return kernel_prototype.arguments[i].EmitReadArrayElement(idx, &b);
};
}
ElementalIrEmitter elemental_emitter(module_, &b, &hlo_module_,
nested_ir_emitter_, fast_min_max());
llvm_ir::ElementGenerator element_generator =
elemental_emitter.MakeElementGenerator(instr, operand_to_generator);
TF_ASSIGN_OR_RETURN(
se::ThreadDim thread_dims,
EmitElementalLoops(b, instr, kernel_prototype, element_generator));
return kernels_.emplace_back(KernelInfo{
kernel_prototype.function->getName().str(), se::BlockDim(), thread_dims});
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitFusionHostKernel(
const HloFusionInstruction* fusion) {
VLOG(2) << "Emit fusion host kernel: " << fusion->name();
if (fusion->fusion_kind() == HloInstruction::FusionKind::kOutput) {
return EmitDotFusionHostKernel(fusion);
}
if (fusion->fusion_kind() != HloInstruction::FusionKind::kLoop) {
return Internal("Unsupported loop fusion kind for instruction: %s",
fusion->ToString());
}
KernelPrototype kernel_prototype = EmitKernelPrototype(fusion);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
ElementalIrEmitter elemental_emitter(module_, &b, &hlo_module_,
nested_ir_emitter_, fast_min_max());
FusedIrEmitter fused_emitter(elemental_emitter);
for (int i = 0; i < fusion->operand_count(); i++) {
fused_emitter.BindGenerator(
*fusion->fused_parameter(i), [&, i](llvm_ir::IrArray::Index idx) {
return kernel_prototype.arguments[i].EmitReadArrayElement(idx, &b);
});
}
if (llvm_ir::CanEmitFusedDynamicUpdateSliceInPlace(
const_cast<HloFusionInstruction*>(fusion),
nested_ir_emitter_->assignment())) {
TF_RETURN_IF_ERROR(llvm_ir::EmitFusedDynamicUpdateSliceInPlace(
const_cast<HloFusionInstruction*>(fusion), kernel_prototype.results[0],
&fused_emitter, &b));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
TF_ASSIGN_OR_RETURN(
auto element_generator,
fused_emitter.GetGenerator(*fusion->fused_expression_root()));
TF_ASSIGN_OR_RETURN(
se::ThreadDim thread_dims,
EmitElementalLoops(b, fusion, kernel_prototype, element_generator));
return kernels_.emplace_back(KernelInfo{
kernel_prototype.function->getName().str(), se::BlockDim(), thread_dims});
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitReductionHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit reduction host kernel: " << instr->name();
return EmitElementalHostKernel(instr);
}
static bool IsDotCodegenStrategy(DotImplementationStrategy strategy) {
static std::array<DotImplementationStrategy, 3> kDotCodegenStrategies = {
DotImplementationStrategy::kNaiveLlvmIr,
DotImplementationStrategy::kTiledLlvmIrGemm,
DotImplementationStrategy::kTiledLlvmIrGemv,
};
return absl::c_find(kDotCodegenStrategies, strategy) !=
kDotCodegenStrategies.end();
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitDotHostKernel(
const HloInstruction* instr) {
VLOG(2) << "Emit dot host kernel: " << instr->name();
DotImplementationStrategy strategy = GetDotImplementationStrategy(
hlo_module_.config(), *instr,
nested_ir_emitter_->target_machine_features());
if (!IsDotCodegenStrategy(strategy)) {
return Internal("Unsupported dot implementation strategy");
}
KernelPrototype kernel_prototype = EmitKernelPrototype(instr);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray lhs_array = kernel_prototype.arguments[0];
llvm_ir::IrArray rhs_array = kernel_prototype.arguments[1];
llvm_ir::IrArray target_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(EmitDotOperation(
*instr, target_array, lhs_array, rhs_array,
nullptr, nullptr, &b,
hlo_module_.config(), nested_ir_emitter_->target_machine_features(),
false));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
absl::StatusOr<IrEmitter2::KernelInfo> IrEmitter2::EmitDotFusionHostKernel(
const HloFusionInstruction* fusion) {
VLOG(2) << "Emit dot fusion host kernel: " << fusion->name();
const HloInstruction* add = fusion->fused_expression_root();
if (add->opcode() != HloOpcode::kAdd) {
return Internal("Dot fusion supports only `add` root instruction");
}
bool is_dot_operand0 = add->operand(0)->opcode() == HloOpcode::kDot;
bool is_dot_operand1 = add->operand(1)->opcode() == HloOpcode::kDot;
if (is_dot_operand0 == is_dot_operand1) {
return Internal("Dot fusion root instruction must have single dot operand");
}
int64_t dot_op_index = is_dot_operand0 ? 0 : 1;
int64_t addend_op_index = 1 - dot_op_index;
const HloInstruction* dot = add->operand(dot_op_index);
DotImplementationStrategy strategy = GetDotImplementationStrategy(
hlo_module_.config(), *dot,
nested_ir_emitter_->target_machine_features());
if (!IsDotCodegenStrategy(strategy)) {
return Internal("Unsupported dot implementation strategy");
}
int64_t dot_lhs_pnum = dot->operand(0)->parameter_number();
int64_t dot_rhs_pnum = dot->operand(1)->parameter_number();
int64_t addend_pnum = add->operand(addend_op_index)->parameter_number();
KernelPrototype kernel_prototype = EmitKernelPrototype(fusion);
llvm::IRBuilder<> b(module_->getContext());
b.SetInsertPoint(kernel_prototype.function->getEntryBlock().getTerminator());
llvm_ir::IrArray lhs_array = kernel_prototype.arguments[dot_lhs_pnum];
llvm_ir::IrArray rhs_array = kernel_prototype.arguments[dot_rhs_pnum];
llvm_ir::IrArray addend_array = kernel_prototype.arguments[addend_pnum];
llvm_ir::IrArray target_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(EmitDotOperation(
*dot, target_array, lhs_array, rhs_array, &addend_array,
nullptr, &b, hlo_module_.config(),
nested_ir_emitter_->target_machine_features(),
false));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
absl::StatusOr<IrEmitter2::KernelInfo>
IrEmitter2::EmitSelectAndScatterHostKernel(const HloInstruction* instr) {
KernelPrototype kernel_prototype = EmitKernelPrototype(instr);
llvm_ir::IrArray operand_array = kernel_prototype.arguments[0];
llvm_ir::IrArray source_array = kernel_prototype.arguments[1];
llvm_ir::IrArray output_array = kernel_prototype.results[0];
TF_RETURN_IF_ERROR(nested_ir_emitter_->HandleSelectAndScatter(
const_cast<HloInstruction*>(instr), operand_array, source_array,
output_array));
return kernels_.emplace_back(
KernelInfo{kernel_prototype.function->getName().str(), se::BlockDim(),
se::ThreadDim()});
}
IrEmitter2::KernelThreadDims IrEmitter2::EmitKernelThreadDims(
llvm::IRBuilder<>& b, llvm::Value* call_frame) {
auto* td_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 0, "tdims_gep");
auto* tdims = b.CreateLoad(b.getPtrTy(), td_gep, "tdims");
auto* x_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 0, "tdim_x_gep");
auto* y_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 1, "tdim_y_gep");
auto* z_gep = b.CreateStructGEP(thread_dims_ty_, tdims, 2, "tdim_z_gep");
return {b.CreateLoad(b.getInt64Ty(), x_gep, "tdim_x"),
b.CreateLoad(b.getInt64Ty(), y_gep, "tdim_y"),
b.CreateLoad(b.getInt64Ty(), z_gep, "tdim_z")};
}
IrEmitter2::KernelThread IrEmitter2::EmitKernelThread(llvm::IRBuilder<>& b,
llvm::Value* call_frame) {
auto* t_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 1, "tid_gep");
auto* tids = b.CreateLoad(b.getPtrTy(), t_gep, "tids");
auto* x_gep = b.CreateStructGEP(thread_ty_, tids, 0, "tid_x_gep");
auto* y_gep = b.CreateStructGEP(thread_ty_, tids, 1, "tid_y_gep");
auto* z_gep = b.CreateStructGEP(thread_ty_, tids, 2, "tid_z_gep");
return {b.CreateLoad(b.getInt64Ty(), x_gep, "tid_x"),
b.CreateLoad(b.getInt64Ty(), y_gep, "tid_y"),
b.CreateLoad(b.getInt64Ty(), z_gep, "tid_z")};
}
llvm_ir::IrArray IrEmitter2::EmitKernelArgument(llvm::IRBuilder<>& b,
llvm::Value* call_frame,
int64_t index,
const Shape& shape) {
llvm::Type* ptr = llvm::PointerType::get(b.getContext(), 0);
std::string name = absl::StrCat("arg", index);
auto* args_gep = b.CreateStructGEP(call_frame_ty_, call_frame, 3, "args_gep");
auto* args = b.CreateLoad(ptr, args_gep, "args");
auto* data_gep = b.CreateConstGEP2_32(arg_ty_, args, index, 0, name + "_gep");
auto* data = b.CreateLoad(ptr, data_gep, name);
llvm_ir::SetAlignmentMetadataForLoad(data, cpu_function_runtime::MinAlign());
return llvm_ir::IrArray(data, llvm_ir::ShapeToIrType(shape, module_), shape);
}
IrEmitter2::KernelPrototype IrEmitter2::EmitKernelPrototype(
std::string_view name, absl::Span<const Shape> arguments,
absl::Span<const Shape> results) {
VLOG(3) << "Emit kernel prototype: " << name
<< ", #arguments=" << arguments.size()
<< ", #results=" << results.size();
for (const Shape& argument : arguments) {
VLOG(3) << " argument: " << argument.ToString(true);
}
for (const Shape& result : results) {
VLOG(3) << " result: " << result.ToString(true);
}
llvm::LLVMContext& ctx = module_->getContext();
llvm::IRBuilder<> b(ctx);
llvm::Function* function = llvm::dyn_cast<llvm::Function>(
module_->getOrInsertFunction(name, KernelFunctionTy(ctx)).getCallee());
function->setCallingConv(llvm::CallingConv::C);
function->setDoesNotThrow();
const DebugOptions& debug_options = hlo_module_.config().debug_options();
function->addFnAttr(
"prefer-vector-width",
absl::StrCat(debug_options.xla_cpu_prefer_vector_width()));
function->addFnAttr("frame-pointer", "all");
b.SetInsertPoint(llvm::BasicBlock::Create(ctx, "", function));
llvm::Value* call_frame = function->getArg(0);
KernelThreadDims kernel_thread_dims = EmitKernelThreadDims(b, call_frame);
KernelThread kernel_thread = EmitKernelThread(b, call_frame);
int64_t idx = 0;
std::vector<llvm_ir::IrArray> ir_arguments;
for (const Shape& argument : arguments) {
ir_arguments.push_back(EmitKernelArgument(b, call_frame, idx++, argument));
}
std::vector<llvm_ir::IrArray> ir_results;
for (const Shape& result : results) {
ir_results.push_back(EmitKernelArgument(b, call_frame, idx++, result));
}
b.CreateRet(
llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(ctx)));
return KernelPrototype{function, kernel_thread_dims, kernel_thread,
std::move(ir_arguments), std::move(ir_results)};
}
IrEmitter2::KernelPrototype IrEmitter2::EmitKernelPrototype(
const HloInstruction* instr) {
return EmitKernelPrototype(instr->name(), FlattenedParameters(instr),
FlattenedResults(instr));
}
std::optional<IrEmitter2::ParallelConfig> IrEmitter2::GetParallelConfig(
const HloInstruction* instr) {
auto backend_config = instr->backend_config<BackendConfig>();
if (!backend_config.ok() ||
backend_config->outer_dimension_partitions().empty()) {
return std::nullopt;
}
ParallelConfig config;
config.outer_dimension_partitions.assign(
backend_config->outer_dimension_partitions().begin(),
backend_config->outer_dimension_partitions().end());
return config;
}
IrEmitter2::ParallelPartitionBounds IrEmitter2::EmitParallelPartitionBounds(
llvm::IRBuilder<>& b, const KernelPrototype& kernel_prototype,
const ParallelConfig& parallel_config, const Shape& shape,
std::string_view name) {
ShapePartitionIterator it(shape, parallel_config.outer_dimension_partitions);
size_t num_parallel_dimensions =
parallel_config.outer_dimension_partitions.size();
llvm::ArrayType* dim_bounds_ty = llvm::ArrayType::get(b.getInt64Ty(), 2);
llvm::ArrayType* partition_bounds_ty =
llvm::ArrayType::get(dim_bounds_ty, num_parallel_dimensions);
llvm::ArrayType* parallel_bounds_ty =
llvm::ArrayType::get(partition_bounds_ty, it.GetTotalPartitionCount());
std::vector<llvm::Constant*> partition_bounds;
for (int64_t i = 0; i < it.GetTotalPartitionCount(); ++i) {
std::vector<llvm::Constant*> dim_counts;
for (auto [lower, size] : it.GetPartition(i)) {
dim_counts.push_back(llvm::ConstantArray::get(
dim_bounds_ty, {b.getInt64(lower), b.getInt64(lower + size)}));
}
partition_bounds.push_back(
llvm::ConstantArray::get(partition_bounds_ty, dim_counts));
}
llvm::Constant* parallel_bounds =
llvm::Constant | #include "xla/service/cpu/ir_emitter2.h"
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "llvm/IR/LLVMContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
using IrEmitter2Test = HloTestBase;
TEST_F(IrEmitter2Test, BuildKernelPrototype) {
auto hlo = std::make_unique<HloModule>("test", HloModuleConfig());
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
auto shape = ShapeUtil::MakeShape(PrimitiveType::F32, {4, 2});
std::vector<Shape> parameters = {shape};
std::vector<Shape> results = {shape};
IrEmitter2 ir_emitter(*hlo, module.get(), nullptr);
IrEmitter2::KernelPrototype prototype =
ir_emitter.EmitKernelPrototype("test", parameters, results);
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: define ptr @test(ptr %0) #0 {
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 0
CHECK: getelementptr inbounds %SE_HOST_KernelThreadDim, {{.*}} i32 0
CHECK: getelementptr inbounds %SE_HOST_KernelThreadDim, {{.*}} i32 1
CHECK: getelementptr inbounds %SE_HOST_KernelThreadDim, {{.*}} i32 2
CHECK: load i64
CHECK: load i64
CHECK: load i64
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 1
CHECK: getelementptr inbounds %SE_HOST_KernelThread, {{.*}} i32 0
CHECK: getelementptr inbounds %SE_HOST_KernelThread, {{.*}} i32 1
CHECK: getelementptr inbounds %SE_HOST_KernelThread, {{.*}} i32 2
CHECK: load i64
CHECK: load i64
CHECK: load i64
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 0, i32 0
CHECK: load ptr, {{.*}} !align !0
CHECK-NEXT: getelementptr inbounds %SE_HOST_KernelCallFrame, {{.*}} i32 3
CHECK: load ptr
CHECK: getelementptr %SE_HOST_KernelArg, {{.*}} i32 1, i32 0
CHECK: load ptr, {{.*}} !align !0
CHECK: ret ptr null
CHECK: }
CHECK: !0 = !{i64 16}
)"));
}
TEST_F(IrEmitter2Test, EmitElementalKernel) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[2,2] parameter(0)
ROOT convert = s32[2,2] convert(p0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
HloInstruction* convert = FindInstruction(hlo.get(), "convert");
ASSERT_NE(convert, nullptr);
IrEmitter2 ir_emitter(*hlo, module.get(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
ir_emitter.EmitElementalHostKernel(convert));
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: define ptr @convert(ptr %0) #0 {
CHECK: fptosi float {{.*}} to i32
CHECK: }
)"));
}
TEST_F(IrEmitter2Test, EmitParallelKernel) {
llvm::LLVMContext context;
auto module = std::make_unique<llvm::Module>("test", context);
const char* hlo_text = R"(
HloModule m
ENTRY main {
p0 = f32[1,2,1,16384,256] parameter(0)
ROOT convert = s32[1,2,1,16384,256] convert(p0),
backend_config={"outer_dimension_partitions":["1","2","1","4"]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));
HloInstruction* convert = FindInstruction(hlo.get(), "convert");
ASSERT_NE(convert, nullptr);
IrEmitter2 ir_emitter(*hlo, module.get(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(IrEmitter2::KernelInfo kernel,
ir_emitter.EmitElementalHostKernel(convert));
ASSERT_TRUE(*RunFileCheck(llvm_ir::DumpToString(module.get()), R"(
CHECK: @convert_parallel_bounds = private constant [8 x [4 x [2 x i64]]]
CHECK: define ptr @convert(ptr %0) #0 {
CHECK: %lo_dim_0_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 0, i32 0
CHECK: %up_dim_0_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 0, i32 1
CHECK: %lo_dim_1_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 1, i32 0
CHECK: %up_dim_1_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 1, i32 1
CHECK: %lo_dim_2_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 2, i32 0
CHECK: %up_dim_2_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 2, i32 1
CHECK: %lo_dim_3_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 3, i32 0
CHECK: %up_dim_3_gep = getelementptr{{.*}} i32 0, i64 %tid_x, i32 3, i32 1
CHECK: fptosi float {{.*}} to i32
CHECK: }
)"));
}
}
} | 2,014 |
#ifndef XLA_SERVICE_CPU_ONEDNN_LAYER_NORM_H_
#define XLA_SERVICE_CPU_ONEDNN_LAYER_NORM_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
namespace xla {
namespace cpu {
extern "C" {
extern void __xla_cpu_runtime_OneDnnLayerNorm(void* result, void** args);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_layer_norm.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#define EIGEN_USE_THREADS
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::layer_normalization_forward;
using dnnl::memory;
using dnnl::normalization_flags;
using dnnl::prop_kind;
using dnnl::stream;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnLayerNorm(
void* result, void** args) {
int arg_indx = 1;
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
engine cpu_engine(engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream =
stream(dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnNormConfig ln_config;
ln_config.ParseFromString(config_str);
MemrefInfo layer_minfo(args[arg_indx++]);
MemrefInfo gamma_minfo(args[arg_indx++]);
MemrefInfo beta_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto src_md = layer_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto scaleshift_md = beta_minfo.GetOneDnnMemDesc();
auto src_mem = memory(src_md, cpu_engine, layer_minfo.Data());
auto dst_mem = memory(dst_md, cpu_engine, result_minfo.Data());
auto scale_mem = memory(scaleshift_md, cpu_engine, gamma_minfo.Data());
auto shift_mem = memory(scaleshift_md, cpu_engine, beta_minfo.Data());
float epsilon;
*(reinterpret_cast<int32_t*>(&epsilon)) = ln_config.epsilon_typecast();
auto lnorm_pd = layer_normalization_forward::primitive_desc(
cpu_engine, prop_kind::forward_inference, src_md, dst_md, epsilon,
normalization_flags::use_scale | normalization_flags::use_shift);
auto lnorm_prim = layer_normalization_forward(lnorm_pd);
std::unordered_map<int, memory> ln_args;
ln_args.insert({DNNL_ARG_SRC, src_mem});
ln_args.insert({DNNL_ARG_SCALE, scale_mem});
ln_args.insert({DNNL_ARG_SHIFT, shift_mem});
ln_args.insert({DNNL_ARG_DST, dst_mem});
lnorm_prim.execute(onednn_stream, ln_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class LayerNormTest : public HloTestBase {
protected:
const char* onednn_layer_norm_ =
R"(
; CHECK: custom_call_target="__onednn$layernorm",
; CHECK: backend_config={
; CHECK-DAG: "onednn_layer_norm_config":{
; CHECK-DAG: "rescale":"SCALE_AND_SHIFT"
; CHECK-DAG: }
; CHECK: }
)";
std::string common_hlo_region_ =
R"(
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
)";
std::string common_hlo_entry_computation_block_ =
R"(
Arg_0.2 = f32[768]{0} parameter(1), sharding={replicated}
Arg_0.3 = f32[768]{0} parameter(2), sharding={replicated}
convert.290 = f32[84,197,768]{2,1,0} convert(Arg_0.1)
constant.291 = f32[] constant(0)
convert.292 = f32[] convert(constant.291)
reduce.297 = f32[84,197]{1,0} reduce(convert.290, convert.292), dimensions={2}, to_apply=region_add
constant.298 = s32[] constant(768)
convert.299 = f32[] convert(constant.298)
broadcast.300 = f32[84,197]{1,0} broadcast(convert.299), dimensions={}
divide.301 = f32[84,197]{1,0} divide(reduce.297, broadcast.300)
convert.302 = f32[84,197]{1,0} convert(divide.301)
reshape.303 = f32[84,197,1]{2,1,0} reshape(convert.302)
reshape.304 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.305 = f32[84,197,768]{2,1,0} broadcast(reshape.304), dimensions={0,1}
subtract.306 = f32[84,197,768]{2,1,0} subtract(Arg_0.1, broadcast.305)
multiply.307 = f32[84,197,768]{2,1,0} multiply(subtract.306, subtract.306)
convert.308 = f32[84,197,768]{2,1,0} convert(multiply.307)
constant.309 = f32[] constant(0)
convert.310 = f32[] convert(constant.309)
reduce.315 = f32[84,197]{1,0} reduce(convert.308, convert.310), dimensions={2}, to_apply=region_add
constant.316 = s32[] constant(768)
convert.317 = f32[] convert(constant.316)
broadcast.318 = f32[84,197]{1,0} broadcast(convert.317), dimensions={}
divide.319 = f32[84,197]{1,0} divide(reduce.315, broadcast.318)
convert.320 = f32[84,197]{1,0} convert(divide.319)
reshape.321 = f32[84,197,1]{2,1,0} reshape(convert.320)
constant.322 = f32[] constant(1e-12)
broadcast.323 = f32[84,197,1]{2,1,0} broadcast(constant.322), dimensions={}
add.324 = f32[84,197,1]{2,1,0} add(reshape.321, broadcast.323)
rsqrt.325 = f32[84,197,1]{2,1,0} rsqrt(add.324)
reshape.328 = f32[84,197]{1,0} reshape(rsqrt.325)
broadcast.329 = f32[84,197,768]{2,1,0} broadcast(reshape.328), dimensions={0,1}
broadcast.327 = f32[84,197,768]{2,1,0} broadcast(Arg_0.2), dimensions={2}
multiply.330 = f32[84,197,768]{2,1,0} multiply(broadcast.329, broadcast.327)
multiply.331 = f32[84,197,768]{2,1,0} multiply(Arg_0.1, multiply.330)
broadcast.336 = f32[84,197,768]{2,1,0} broadcast(Arg_0.3), dimensions={2}
reshape.332 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.333 = f32[84,197,768]{2,1,0} broadcast(reshape.332), dimensions={0,1}
multiply.334 = f32[84,197,768]{2,1,0} multiply(multiply.330, broadcast.333)
subtract.337 = f32[84,197,768]{2,1,0} subtract(broadcast.336, multiply.334)
)";
};
TEST_F(LayerNormTest, LayerNormTest0_FP32) {
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f32[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f32[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1 = f32[84,197,768]{2,1,0} parameter(0), sharding={replicated}
)" + common_hlo_entry_computation_block_ +
R"(
ROOT add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(bf16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->bf16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = bf16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = bf16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = f16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = f16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2 = f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(Arg_2, broadcast_1)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_2 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1 = f32[2,4] divide(reduce_2, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast_3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(Arg_2, multiply_1)
constant_5 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_6 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_6)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest2_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2= f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(broadcast_1, Arg_2)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_1 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1= f32[2,4] divide(reduce_1, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(multiply_1, Arg_2)
constant_5 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_5 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_5)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
ENTRY main {
Arg_0.1 = bf16[160,197,768] parameter(0), sharding={replicated}
Arg_0.2 = bf16[768] parameter(1), sharding={replicated}
Arg_0.3 = bf16[768] parameter(2), sharding={replicated}
convert.80 = f32[160,197,768] convert(Arg_0.1)
constant.81 = f32[] constant(0)
convert.82 = f32[] convert(constant.81)
reduce.87 = f32[160,197] reduce(convert.80, convert.82), dimensions={2}, to_apply=region_add
constant.88 = s32[] constant(768)
convert.89 = f32[] convert(constant.88)
broadcast.90 = f32[160,197] broadcast(convert.89), dimensions={}
divide.91 = f32[160,197] divide(reduce.87, broadcast.90)
convert.92 = bf16[160,197] convert(divide.91)
reshape.93 = bf16[160,197,1] reshape(convert.92)
reshape.94 = bf16[160,197] reshape(reshape.93)
broadcast.95 = bf16[160,197,768] broadcast(reshape.94), dimensions={0,1}
subtract.96 = bf16[160,197,768] subtract(Arg_0.1, broadcast.95)
multiply.97 = bf16[160,197,768] multiply(subtract.96, subtract.96)
convert.98 = f32[160,197,768] convert(multiply.97)
constant.99 = f32[] constant(0)
convert.100 = f32[] convert(constant.99)
reduce.105 = f32[160,197] reduce(convert.98, convert.100), dimensions={2}, to_apply=region_add
constant.106 = s32[] constant(768)
convert.107 = f32[] convert(constant.106)
broadcast.108 = f32[160,197] broadcast(convert.107), dimensions={}
divide.109 = f32[160,197] divide(reduce.105, broadcast.108)
convert.110 = bf16[160,197] convert(divide.109)
reshape.111 = bf16[160,197,1] reshape(convert.110)
constant.112 = bf16[] constant(1.002e-12)
broadcast.113 = bf16[160,197,1] broadcast(constant.112), dimensions={}
add.114 = bf16[160,197,1] add(reshape.111, broadcast.113)
rsqrt.115 = bf16[160,197,1] rsqrt(add.114)
reshape.118 = bf16[160,197] reshape(rsqrt.115)
broadcast.119 = bf16[160,197,768] broadcast(reshape.118), dimensions={0,1}
broadcast.117 = bf16[160,197,768] broadcast(Arg_0.2), dimensions={2}
multiply.120 = bf16[160,197,768] multiply(broadcast.119, broadcast.117)
multiply.121 = bf16[160,197,768] multiply(Arg_0.1, multiply.120)
broadcast.126 = bf16[160,197,768] broadcast(Arg_0.3), dimensions={2}
reshape.122 = bf16[160,197] reshape(reshape.93)
broadcast.123 = bf16[160,197,768] broadcast(reshape.122), dimensions={0,1}
multiply.124 = bf16[160,197,768] multiply(multiply.120, broadcast.123)
subtract.127 = bf16[160,197,768] subtract(broadcast.126, multiply.124)
ROOT add.128 = bf16[160,197,768] add(multiply.121, subtract.127)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
}
}
#endif | 2,015 |
#ifndef XLA_SERVICE_GPU_IR_EMISSION_UTILS_H_
#define XLA_SERVICE_GPU_IR_EMISSION_UTILS_H_
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
inline constexpr int64_t kMinDimensionToTransposeTiled = 16;
inline constexpr int64_t kMinDimensionToTransposeTiled2 = 8;
inline constexpr int64_t kMinTotalDimensionsToTransposeTiled = 64 * 128;
bool IsMatrixMultiplication(const HloInstruction& dot);
bool IsMatrixVectorMultiplication(const HloInstruction& dot);
inline constexpr int64_t WarpSize() { return 32; }
inline constexpr absl::string_view kCustomFusionKind = "__custom_fusion";
inline constexpr absl::string_view kTritonFusionKind = "__triton";
inline constexpr absl::string_view kTritonGemmFusionKind = "__triton_gemm";
inline constexpr absl::string_view kCuDnnFusionKind = "__cudnn$fusion";
inline constexpr absl::string_view kUncompilableFusion =
"__uncompilable_fusion";
inline constexpr absl::string_view kTopKCustomCallTarget = "__gpu$TopK";
bool IsCustomCallToCusolver(const HloInstruction& hlo);
bool IsCustomCallToTopK(const HloInstruction& hlo);
extern const char* const kCusolverCholeskyCallTarget;
bool IsSliceWithUnitStrides(const HloInstruction* instr);
bool IsContiguousSlice(const HloInstruction& instr);
bool IsContiguousSlice(const Shape& orig, const Shape& sliced);
llvm::Value* EmitFullWarpShuffleDown(
llvm::Value* value, llvm::Value* offset, llvm::IRBuilder<>* builder,
const se::DeviceDescription& gpu_device_info);
llvm::Value* IsBlock0Thread0(llvm::IRBuilder<>* b);
absl::StatusOr<BufferAllocation::Slice> GetAllocationSlice(
const BufferAssignment& buffer_assignment, const HloInstruction* instr,
const ShapeIndex& index);
absl::StatusOr<bool> CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
const HloFusionInstruction* fusion,
std::function<absl::StatusOr<BufferAllocation::Slice>(
const HloInstruction* instr, const ShapeIndex& index)>
get_allocation_slice,
absl::Span<HloInstructionAdaptor const> roots);
std::vector<const HloInstruction*> GetOutputDefiningDynamicUpdateSlices(
absl::Span<HloInstructionAdaptor const> roots);
HloInstructionAdaptor FindNonTrivialHero(const HloInstructionAdaptor& instr);
const HloInstruction& FindNonTrivialHero(const HloInstruction& instr);
struct TransposeDescription {
const HloInstruction* instr;
Vector3 dimensions;
Vector3 permutation;
TransposeDescription(Vector3 dimensions, Vector3 permutation)
: TransposeDescription(nullptr, dimensions, permutation) {}
TransposeDescription(const HloInstruction* instr, Vector3 dimensions,
Vector3 permutation)
: instr(instr), dimensions(dimensions), permutation(permutation) {}
const Shape& input_shape() const { return instr->operand(0)->shape(); }
bool IsEquivalent(const TransposeDescription& other) const {
return dimensions == other.dimensions && permutation == other.permutation;
}
};
std::optional<TransposeDescription> GetDescriptionForTiledTransposeEmitter(
const HloInstruction& root, const HloInstruction& hero);
bool IsIntermediate(const HloInstruction* instr, int allowed_operand_count = 1);
void VLogModule(int level, const llvm::Module& module);
void VerifyModule(const llvm::Module& module);
llvm::Type* GetIndexTypeForKernel(const HloInstruction* hlo,
int64_t launch_size, llvm::IRBuilder<>* b);
bool IsAMDGPU(const llvm::Module* module);
bool IsSPIR(const llvm::Module* module);
class DenseDataIntermediate {
public:
static DenseDataIntermediate Own(std::vector<uint8_t> owned) {
DenseDataIntermediate di;
di.data_ = std::move(owned);
return di;
}
static DenseDataIntermediate Alias(absl::Span<const uint8_t> aliased) {
DenseDataIntermediate di;
di.data_ = aliased;
return di;
}
absl::Span<const uint8_t> span() const {
return data_.index() == 0 ? absl::Span<const uint8_t>(std::get<0>(data_))
: std::get<1>(data_);
}
private:
std::variant<std::vector<uint8_t>, absl::Span<const uint8_t>> data_;
};
absl::StatusOr<DenseDataIntermediate> LiteralToXlaFormat(
const Literal& literal);
}
}
#endif
#include "xla/service/gpu/ir_emission_utils.h"
#include <cstdint>
#include <functional>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FPEnv.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/llvm_ir/buffer_assignment_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/translate/mhlo_to_hlo/location_exporter.h"
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsRank2(const Shape& shape, int64_t batch_dimensions_size) {
return shape.rank() == batch_dimensions_size + 2;
}
bool IsRank1(const Shape& shape, int64_t batch_dimensions_size) {
return shape.rank() == batch_dimensions_size + 1;
}
}
bool IsMatrixMultiplication(const HloInstruction& dot) {
if (dot.opcode() != HloOpcode::kDot) {
return false;
}
const Shape& lhs_shape = dot.operand(0)->shape();
const Shape& rhs_shape = dot.operand(1)->shape();
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
PrimitiveType output_primitive_type = dot.shape().element_type();
bool type_is_allowed =
(output_primitive_type == F8E4M3FN || output_primitive_type == F8E5M2 ||
output_primitive_type == F8E4M3FNUZ ||
output_primitive_type == F8E5M2FNUZ || output_primitive_type == F16 ||
output_primitive_type == BF16 || output_primitive_type == F32 ||
output_primitive_type == F64 || output_primitive_type == C64 ||
output_primitive_type == C128) ||
(output_primitive_type == S32 && lhs_shape.element_type() == S8 &&
rhs_shape.element_type() == S8);
bool shapes_are_valid =
type_is_allowed &&
IsRank2(lhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank2(rhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank2(dot.shape(), dim_numbers.lhs_batch_dimensions_size()) &&
!ShapeUtil::IsZeroElementArray(lhs_shape) &&
!ShapeUtil::IsZeroElementArray(rhs_shape);
return shapes_are_valid;
}
bool IsMatrixVectorMultiplication(const HloInstruction& dot) {
if (dot.opcode() != HloOpcode::kDot) {
return false;
}
const Shape& lhs_shape = dot.operand(0)->shape();
const Shape& rhs_shape = dot.operand(1)->shape();
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
PrimitiveType output_primitive_type = dot.shape().element_type();
bool type_is_allowed =
(output_primitive_type == F8E4M3FN || output_primitive_type == F8E5M2 ||
output_primitive_type == F16 || output_primitive_type == BF16 ||
output_primitive_type == F32 || output_primitive_type == F64 ||
output_primitive_type == C64 || output_primitive_type == C128) ||
(output_primitive_type == S32 && lhs_shape.element_type() == S8 &&
rhs_shape.element_type() == S8);
bool shapes_are_valid =
type_is_allowed &&
((IsRank2(lhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank1(rhs_shape, dim_numbers.lhs_batch_dimensions_size())) ||
(IsRank1(lhs_shape, dim_numbers.lhs_batch_dimensions_size()) &&
IsRank2(rhs_shape, dim_numbers.lhs_batch_dimensions_size()))) &&
IsRank1(dot.shape(), dim_numbers.lhs_batch_dimensions_size()) &&
!ShapeUtil::IsZeroElementArray(lhs_shape) &&
!ShapeUtil::IsZeroElementArray(rhs_shape);
return shapes_are_valid;
}
const char* const kCusolverCholeskyCallTarget = "__cusolver$cholesky";
bool IsCustomCallToCusolver(const HloInstruction& hlo) {
if (hlo.opcode() != HloOpcode::kCustomCall) {
return false;
}
return hlo.custom_call_target() == kCusolverCholeskyCallTarget;
}
bool IsCustomCallToTopK(const HloInstruction& hlo) {
return hlo.opcode() == HloOpcode::kCustomCall &&
hlo.custom_call_target() == kTopKCustomCallTarget;
}
bool IsSliceWithUnitStrides(const HloInstruction* instr) {
auto slice = DynCast<HloSliceInstruction>(instr);
return slice && absl::c_all_of(slice->slice_strides(),
[](int64_t stride) { return stride == 1; });
}
bool IsContiguousSlice(const HloInstruction& instr) {
auto slice = DynCast<HloSliceInstruction>(&instr);
if (!slice) return false;
const Shape& src_shape = slice->operand(0)->shape();
const Shape& dst_shape = slice->shape();
return IsContiguousSlice(src_shape, dst_shape);
}
bool IsContiguousSlice(const Shape& orig, const Shape& sliced) {
bool sliced_dim_found = false;
for (auto dim : orig.layout().minor_to_major()) {
if (!sliced_dim_found) {
sliced_dim_found = sliced.dimensions(dim) < orig.dimensions(dim);
continue;
}
if (sliced.dimensions(dim) != 1) return false;
}
return true;
}
llvm::Value* EmitAMDGPUShflDown(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
auto* i32_ty = b->getInt32Ty();
llvm::FunctionCallee shfl_fn = module->getOrInsertFunction(
llvm_ir::AsStringRef("__ockl_readuplane_i32"),
llvm::FunctionType::get(i32_ty, {i32_ty, i32_ty},
false));
llvm::Value* result =
b->CreateCall(shfl_fn, {b->CreateBitCast(value, i32_ty), offset});
return b->CreateBitCast(result, value->getType());
}
llvm::Value* EmitAMDGPUShflDownSwizzle(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
auto* i32_ty = b->getInt32Ty();
llvm::Function* intrinsic = llvm::cast<llvm::Function>(
module
->getOrInsertFunction(
"llvm.amdgcn.ds.swizzle",
llvm::FunctionType::get(i32_ty, {i32_ty, i32_ty},
false))
.getCallee());
llvm::Value* bitcast_value = b->CreateBitCast(value, i32_ty);
llvm::Value* control_value =
b->CreateAdd(b->CreateMul(offset, b->getInt32(0x20)), b->getInt32(0x1f));
llvm::Value* result =
b->CreateCall(intrinsic, {bitcast_value, control_value});
return b->CreateBitCast(result, value->getType());
}
llvm::Value* EmitNVPTXShflDown(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
llvm::Intrinsic::ID llvm_intrinsic_id;
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
if (value->getType()->isFloatTy()) {
llvm_intrinsic_id = llvm::Intrinsic::nvvm_shfl_sync_down_f32;
} else {
llvm_intrinsic_id = llvm::Intrinsic::nvvm_shfl_sync_down_i32;
}
llvm::Function* intrinsic =
llvm::Intrinsic::getDeclaration(module, llvm_intrinsic_id, {});
return b->CreateCall(
intrinsic, {b->getInt32(-1), value, offset, b->getInt32(WarpSize() - 1)});
}
llvm::Value* EmitSPIRShflDown(llvm::Value* value, llvm::Value* offset,
llvm::IRBuilder<>* b) {
CHECK_EQ(value->getType()->getPrimitiveSizeInBits(), 32);
if (value->getType()->isFloatTy()) {
return EmitDeviceFunctionCall(
"_Z34__spirv_GroupNonUniformShuffleDownffj",
{b->getInt32(3), value, offset}, {U32, F32, U32}, F32,
llvm::AttrBuilder(b->getContext())
.addAttribute(llvm::Attribute::NoUnwind)
.addAttribute(llvm::Attribute::Convergent),
b);
} else {
return EmitDeviceFunctionCall(
"_Z34__spirv_GroupNonUniformShuffleDownjjj",
{b->getInt32(3), value, offset}, {U32, U32, U32}, U32,
llvm::AttrBuilder(b->getContext())
.addAttribute(llvm::Attribute::NoUnwind)
.addAttribute(llvm::Attribute::Convergent),
b);
}
}
llvm::Value* EmitFullWarpShuffleDown(
llvm::Value* value, llvm::Value* offset, llvm::IRBuilder<>* builder,
const se::DeviceDescription& gpu_device_info) {
int bit_width = value->getType()->getPrimitiveSizeInBits();
llvm::Module* module = builder->GetInsertBlock()->getModule();
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
if (value->getType()->isFloatTy() && bit_width == 32) {
if (target_triple.isNVPTX()) {
return EmitNVPTXShflDown(value, offset, builder);
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
if (gpu_device_info.rocm_compute_capability().gfx9_mi100_or_later()) {
return EmitAMDGPUShflDownSwizzle(value, offset, builder);
}
return EmitAMDGPUShflDown(value, offset, builder);
} else if (target_triple.isSPIR()) {
return EmitSPIRShflDown(value, offset, builder);
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
int num_segments = CeilOfRatio(bit_width, 32);
llvm::Value* x = builder->CreateBitCast(
builder->CreateZExt(
builder->CreateBitCast(value, builder->getIntNTy(bit_width)),
builder->getIntNTy(32 * num_segments)),
llvm::VectorType::get(builder->getInt32Ty(), num_segments, false));
for (int i = 0; i < num_segments; ++i) {
llvm::Value* insert_val;
if (target_triple.isNVPTX()) {
insert_val = EmitNVPTXShflDown(builder->CreateExtractElement(x, i),
offset, builder);
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
if (gpu_device_info.rocm_compute_capability().gfx9_mi100_or_later()) {
insert_val = EmitAMDGPUShflDownSwizzle(
builder->CreateExtractElement(x, i), offset, builder);
} else {
insert_val = EmitAMDGPUShflDown(builder->CreateExtractElement(x, i),
offset, builder);
}
} else if (target_triple.isSPIR()) {
insert_val = EmitSPIRShflDown(builder->CreateExtractElement(x, i), offset,
builder);
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
x = builder->CreateInsertElement(x, insert_val, i);
}
return builder->CreateBitCast(
builder->CreateTrunc(
builder->CreateBitCast(x, builder->getIntNTy(32 * num_segments)),
builder->getIntNTy(bit_width)),
value->getType());
}
llvm::Value* IsBlock0Thread0(llvm::IRBuilder<>* b) {
llvm::Value* is_thread0 = b->CreateICmpEQ(
b->getInt32(0),
EmitCallToTargetIntrinsic(TargetIntrinsicID::kThreadIdx, {}, {}, b));
llvm::Value* is_block0 = b->CreateICmpEQ(
b->getInt32(0),
EmitCallToTargetIntrinsic(TargetIntrinsicID::kBlockIdx, {}, {}, b));
return b->CreateAnd(is_thread0, is_block0);
}
absl::StatusOr<BufferAllocation::Slice> GetAllocationSlice(
const BufferAssignment& buffer_assignment, const HloInstruction* instr,
const ShapeIndex& index) {
return buffer_assignment.GetUniqueSlice(instr, index);
}
std::vector<const HloInstruction*> GetOutputDefiningDynamicUpdateSlices(
absl::Span<HloInstructionAdaptor const> roots) {
std::vector<const HloInstruction*> dus_ops;
for (HloInstructionAdaptor root : roots) {
while (root.opcode() == HloOpcode::kBitcast) {
root = root.GetOperand(0);
}
if (root.opcode() == HloOpcode::kDynamicUpdateSlice) {
dus_ops.push_back(&root.instruction());
}
}
return dus_ops;
}
template <typename T>
absl::InlinedVector<const HloInstruction*, 4> GetStartIndices(T instr) {
absl::InlinedVector<const HloInstruction*, 4> result;
for (int i = instr->first_index_operand_number(); i < instr->operand_count();
i++) {
const HloInstruction* index = instr->operand(i);
result.push_back(index);
}
return result;
}
absl::StatusOr<bool> CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
const HloFusionInstruction* fusion,
std::function<absl::StatusOr<BufferAllocation::Slice>(
const HloInstruction* instr, const ShapeIndex& index)>
get_allocation_slice,
absl::Span<HloInstructionAdaptor const> roots) {
std::vector<const HloInstruction*> dus_instrs =
GetOutputDefiningDynamicUpdateSlices(roots);
std::vector<BufferAllocation::Slice> output_buffers;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
fusion->shape(), [&](const Shape& shape, const ShapeIndex index) {
if (shape.IsArray()) {
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice buffer,
get_allocation_slice(fusion, index));
output_buffers.push_back(buffer);
}
return absl::OkStatus();
}));
if (dus_instrs.size() != output_buffers.size()) {
return false;
}
if (output_buffers.empty()) {
return Internal("Output buffers should not be empty");
}
Shape update_shape = dus_instrs[0]->operand(1)->shape();
for (int i = 0; i < dus_instrs.size(); ++i) {
auto* dus = Cast<HloDynamicUpdateSliceInstruction>(dus_instrs[i]);
if (!dus->IsRoot() && dus->user_count() != 1) return false;
HloInstruction* dus_user = dus->IsRoot() ? nullptr : dus->users().front();
if (dus_user && dus_user->opcode() == HloOpcode::kBitcast) {
if (!dus_user->IsRoot() && dus_user->user_count() != 1) return false;
dus_user = dus_user->IsRoot() ? nullptr : dus_user->users().front();
}
if (dus_user && dus_user->opcode() == HloOpcode::kTuple) {
if (!dus_user->IsRoot()) return false;
dus_user = nullptr;
}
if (dus_user != nullptr) return false;
const HloInstruction* operand = dus->operand(0);
if (operand->opcode() == HloOpcode::kBitcast) {
operand = operand->operand(0);
}
auto* parameter = DynCast<HloParameterInstruction>(operand);
if (!parameter) return false;
std::queue<const HloInstruction*> q;
absl::flat_hash_set<const HloInstruction*> visited;
q.push(parameter);
visited.insert(parameter);
visited.insert(dus);
while (!q.empty()) {
const HloInstruction* instr = q.front();
q.pop();
for (const HloInstruction* user : instr->users()) {
if (user->opcode() == HloOpcode::kDynamicSlice &&
dus->operand(0) == user->operand(0) &&
update_shape == user->shape()) {
absl::InlinedVector<const HloInstruction*, 4> user_start_indices =
GetStartIndices(Cast<HloDynamicSliceInstruction>(user));
absl::InlinedVector<const HloInstruction*, 4> dus_start_indices =
GetStartIndices(dus);
if (ShapeUtil::ElementsIn(update_shape) != 1 &&
user_start_indices != dus_start_indices) {
return false;
}
} else if (user != dus && !user->IsElementwise() &&
user->opcode() != HloOpcode::kBitcast &&
user->opcode() != HloOpcode::kTuple) {
return false;
}
if (visited.insert(user).second) {
q.push(user);
}
}
}
if (dus->update()->shape() != update_shape) {
return false;
}
const HloInstruction* lhs = fusion->operand(parameter->parameter_number());
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice lhs_buffer,
get_allocation_slice(lhs, {}));
BufferAllocation::Slice rhs_buffer = output_buffers[i];
if (lhs_buffer != rhs_buffer) {
return false;
}
}
return true;
}
static std::optional<TransposeDescription> FindTiledTranspose(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kCopy) {
return std::nullopt;
}
if (std::optional<Vector3> tr = ShapeUtil::GetNormalizedTransposeShape(
instr.operand(0)->shape(), instr.shape(), Vector3{0, 2, 1})) {
if ((tr->at(1) >= kMinDimensionToTransposeTiled &&
tr->at(2) >= kMinDimensionToTransposeTiled) ||
(tr->at(1) >= kMinDimensionToTransposeTiled2 &&
tr->at(2) >= kMinDimensionToTransposeTiled2 &&
tr->at(1) * tr->at(2) >= kMinTotalDimensionsToTransposeTiled)) {
return TransposeDescription{&instr, *tr,
Vector3{0, 2, 1}};
}
}
if (std::optional<Vector3> tr = ShapeUtil::GetNormalizedTransposeShape(
instr.operand(0)->shape(), instr.shape(), Vector3{2, 1, 0})) {
if ((tr->at(0) >= kMinDimensionToTransposeTiled &&
tr->at(2) >= kMinDimensionToTransposeTiled) ||
(tr->at(0) >= kMinDimensionToTransposeTiled2 &&
tr->at(2) >= kMinDimensionToTransposeTiled2 &&
tr->at(0) * tr->at(2) >= kMinTotalDimensionsToTransposeTiled)) {
return TransposeDescription{&instr, *tr,
Vector3{2, 1, 0}};
}
}
return std::nullopt;
}
static std::optional<TransposeDescription> FindTiledLogicalTranspose(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kTranspose) {
return std::nullopt;
}
if (std::optional<Vector3> tr = ShapeUtil::GetNormalizedLogicalTransposeShape(
instr.operand(0)->shape(), instr.shape(), instr.dimensions(),
Vector3{0, 2, 1})) {
if ((tr->at(1) >= kMinDimensionToTransposeTiled &&
tr->at(2) >= kMinDimensionToTransposeTiled) || | #include "xla/service/gpu/ir_emission_utils.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
using ::tsl::testing::IsOkAndHolds;
class IrEmissionUtilsTest : public HloTestBase {};
TEST_F(IrEmissionUtilsTest, FindTiledLogicalTranspose) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
ROOT t = f32[64,32,48]{2,1,0} transpose(p), dimensions={2,0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* tr = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*tr, *tr);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, tr);
EXPECT_EQ(result->dimensions, Vector3({1, 64, 1536}));
EXPECT_EQ(result->permutation, Vector3({0, 2, 1}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTranspose) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
ROOT t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*r, *r);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r);
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithIntermediateUnaryOp) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
ROOT n = f32[64,48,32]{2,1,0} negate(t)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*r, *r->operand(0));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r->operand(0));
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithIntermediateUnaryOpS8) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[32,48,64]{2,1,0} parameter(0)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
ROOT c = s8[64,48,32]{2,1,0} convert(t)
}
ENTRY main {
p0 = f32[32,48,64]{2,1,0} parameter(0)
ROOT f = s8[64,48,32]{2,1,0} fusion(p0), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
EXPECT_FALSE(
GetDescriptionForTiledTransposeEmitter(*r, *r->operand(0)).has_value());
EXPECT_EQ(FindNonTrivialHero(*r).name(), "t");
}
TEST_F(IrEmissionUtilsTest, FindReduceHeroEpilogueFusion) {
const char* hlo = R"(
HloModule module
%add {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
%fused_computation (param_0.4: f32[128,64], param_1.4: bf16[]) -> bf16[64] {
%param_0 = f32[128,64]{1,0} parameter(0)
%param_1 = bf16[] parameter(1)
%convert.0 = f32[] convert(bf16[] %param_1)
%reduce.0 = f32[64]{0} reduce(f32[128,64]{1,0} %param_0, f32[] %convert.0), dimensions={0}, to_apply=%add
ROOT %convert.1 = bf16[64]{0} convert(f32[64]{0} %reduce.0)
}
ENTRY %main {
%param_0 = f32[128,64]{1,0} parameter(0)
%param_1 = bf16[] parameter(1)
ROOT fusion = bf16[64]{0} fusion(%param_0, %param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto fusion = HloFusionAdaptor::ForInstruction(r);
const auto& result = FindNonTrivialHero(fusion->GetRoots()[0]);
EXPECT_EQ(result.name(), "reduce.0");
}
TEST_F(IrEmissionUtilsTest, FindReduceHeroEpilogueFusionTwoRootUsers) {
const char* hlo = R"(
HloModule module
Add {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
fused_computation {
param_0 = f32[4,2]{1,0} parameter(0)
neg = f32[4,2]{1,0} negate(param_0)
constant_0 = f32[] constant(0)
reduce.1 = f32[4]{0} reduce(param_0, constant_0), dimensions={1}, to_apply=Add
bitcast.1 = f32[1,1,4]{2,1,0} bitcast(reduce.1)
sign.1 = f32[1,1,4]{2,1,0} sign(bitcast.1)
ROOT tuple.12 = (f32[4,2]{1,0}, f32[1,1,4]{2,1,0}, f32[1,1,4]{2,1,0}) tuple(neg, bitcast.1, sign.1)
}
ENTRY main.7749 {
Arg_2.1 = f32[4,2]{1,0} parameter(0)
ROOT fusion = (f32[4,2]{1,0}, f32[1,1,4]{2,1,0}, f32[1,1,4]{2,1,0}) fusion(Arg_2.1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto fusion = HloFusionAdaptor::ForInstruction(r);
const auto& result = FindNonTrivialHero(fusion->GetRoots()[1]);
EXPECT_EQ(result.name(), "reduce.1");
const auto& result2 = FindNonTrivialHero(fusion->GetRoots()[2]);
EXPECT_EQ(result2.name(), "reduce.1");
}
TEST_F(IrEmissionUtilsTest, FindReduceHeroEpilogueFusionHeroAlsoUsedAsNonHero) {
const char* hlo = R"(
HloModule module
Add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
fused_computation {
p0 = f32[4]{0} parameter(0)
zero = f32[] constant(0.0)
reduce.0 = f32[] reduce(f32[4]{0} p0, f32[] zero), dimensions={0}, to_apply=Add
broadcast = f32[4]{0} broadcast(f32[] reduce.0), dimensions={}
reduce.1 = f32[] reduce(f32[4]{0} broadcast, f32[] zero), dimensions={0}, to_apply=Add
bitcast = f32[1]{0} bitcast(f32[] reduce.0)
ROOT tuple.1 = (f32[], f32[4]{0}, f32[1]{0}) tuple(f32[] reduce.1, f32[4]{0} broadcast, f32[1]{0} bitcast)
}
ENTRY main {
Arg0 = f32[4]{0} parameter(0)
ROOT fusion = (f32[], f32[4]{0}, f32[1]{0}) fusion(Arg0), kind=kInput, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto fusion = HloFusionAdaptor::ForInstruction(r);
const auto& result = FindNonTrivialHero(fusion->GetRoots()[1]);
EXPECT_EQ(result.name(), "broadcast");
const auto& result2 = FindNonTrivialHero(fusion->GetRoots()[2]);
EXPECT_EQ(result2.name(), "reduce.0");
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithIntermediateBinaryOp) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[32,48,64]{2,1,0} parameter(0)
p2 = f32[64,48,32]{2,1,0} parameter(1)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
ROOT add = f32[64,48,32]{2,1,0} add(t, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
auto result = GetDescriptionForTiledTransposeEmitter(*r, *r->operand(0));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r->operand(0));
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindAnyTiledTransposeWithTwoIntermediateBinaryOps) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[32,48,64]{2,1,0} parameter(0)
p2 = f32[64,48,32]{2,1,0} parameter(1)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
mul = f32[64,48,32]{2,1,0} multiply(t, p2)
ROOT add = f32[64,48,32]{2,1,0} add(mul, p2)
}
ENTRY main {
param0 = f32[32,48,64]{2,1,0} parameter(0)
param1 = f32[64,48,32]{2,1,0} parameter(1)
ROOT fusion = f32[64,48,32]{2,1,0} fusion(param0, param1), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*r, FindNonTrivialHero(*r));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, r->operand(0)->operand(0));
EXPECT_EQ(result->dimensions, Vector3({64, 48, 32}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest,
FindAnyTiledTransposeWithIntermediateBinaryOpTwoTransposes) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[32,48,64]{2,1,0} parameter(0)
p2 = f32[48,32,64]{2,1,0} parameter(1)
t = f32[64,48,32]{2,1,0} transpose(p), dimensions={2,1,0}
t2 = f32[64,48,32]{2,1,0} transpose(p2), dimensions={2,0,1}
ROOT add = f32[64,48,32]{2,1,0} add(t, t2)
}
ENTRY main {
param0 = f32[32,48,64]{2,1,0} parameter(0)
param1 = f32[48,32,64]{2,1,0} parameter(1)
ROOT fusion = f32[64,48,32]{2,1,0} fusion(param0, param1), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
EXPECT_FALSE(
GetDescriptionForTiledTransposeEmitter(*r, FindNonTrivialHero(*r))
.has_value());
EXPECT_EQ(&FindNonTrivialHero(*r), r);
}
TEST_F(IrEmissionUtilsTest, FindNonTrivialHeroOutsideFusion) {
const char* hlo = R"(
HloModule module
f {
p0 = f32[100,200,300]{2,1,0} parameter(0)
ROOT add = f32[100,200,300]{2,1,0} add(p0, p0)
}
ENTRY entry {
p0 = f32[300,200,100]{2,1,0} parameter(0)
t = f32[100,200,300]{2,1,0} transpose(p0), dimensions={2,1,0}
fusion = f32[100,200,300]{2,1,0} fusion(t), kind=kLoop, calls=f
ROOT add = f32[100,200,300]{2,1,0} add(t, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* transpose =
module->entry_computation()->GetInstructionWithName("t");
HloInstruction* fusion =
module->entry_computation()->GetInstructionWithName("fusion");
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(transpose, fusion);
HloInstructionAdaptor r(
*module->GetComputationWithName("f")->root_instruction(),
fusion_adaptor.get());
EXPECT_EQ(&FindNonTrivialHero(r).instruction(), transpose);
}
TEST_F(IrEmissionUtilsTest, FindNonTrivialTransposeHeroInsideFusion) {
const char* hlo = R"(
HloModule module
f {
p0 = f32[300,200,100]{2,1,0} parameter(0)
t = f32[100,200,300]{2,1,0} transpose(p0), dimensions={2,1,0}
ROOT add = f32[100,200,300]{2,1,0} add(t, t)
}
ENTRY entry {
p0 = f32[300,200,100]{2,1,0} parameter(0)
p1 = f32[100,200,300]{2,1,0} parameter(1)
fusion = f32[100,200,300]{2,1,0} fusion(p0), kind=kLoop, calls=f
ROOT add = f32[100,200,300]{2,1,0} add(p1, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
HloInstruction* transpose = module->GetComputationWithName("f")
->parameter_instruction(0)
->users()
.front();
HloInstruction* fusion =
module->entry_computation()->GetInstructionWithName("fusion");
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(fusion, r);
EXPECT_EQ(&FindNonTrivialHero(HloInstructionAdaptor(*r, fusion_adaptor.get()))
.instruction(),
transpose);
}
TEST_F(IrEmissionUtilsTest, FindNonTrivialCopyHeroInsideFusion) {
const char* hlo = R"(
HloModule module
f {
p0 = f32[100,200,300]{2,1,0} parameter(0)
t = f32[100,200,300]{0,1,2} copy(p0)
ROOT add = f32[100,200,300]{0,1,2} add(t, t)
}
ENTRY entry {
p0 = f32[100,200,300]{2,1,0} parameter(0)
p1 = f32[100,200,300]{0,1,2} parameter(1)
fusion = f32[100,200,300]{0,1,2} fusion(p0), kind=kLoop, calls=f
ROOT add = f32[100,200,300]{0,1,2} add(p1, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r = module->entry_computation()->root_instruction();
HloInstruction* copy = module->GetComputationWithName("f")
->parameter_instruction(0)
->users()
.front();
HloInstruction* fusion =
module->entry_computation()->GetInstructionWithName("fusion");
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(fusion, r);
EXPECT_EQ(&FindNonTrivialHero(HloInstructionAdaptor(*r, fusion_adaptor.get()))
.instruction(),
copy);
}
TEST_F(IrEmissionUtilsTest, TransposeReachableViaTrivialAndNontrivialOps) {
const char* hlo = R"(
HloModule module
fusion {
p = f64[16,16]{1,0} parameter(0)
trans = f64[16,16]{1,0} transpose(p), dimensions={1,0}
rev = f64[16,16]{1,0} reverse(trans), dimensions={0,1}
sub = f64[16,16]{1,0} subtract(trans, trans)
ROOT add = f64[16,16]{1,0} add(rev, sub)
}
ENTRY main {
param = f64[16,16]{1,0} parameter(0)
ROOT fusion = f64[16,16]{1,0} fusion(param), kind=kLoop, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* r =
module->entry_computation()->root_instruction()->fused_expression_root();
EXPECT_FALSE(
GetDescriptionForTiledTransposeEmitter(*r, FindNonTrivialHero(*r))
.has_value());
EXPECT_EQ(&FindNonTrivialHero(*r), r);
}
TEST_F(IrEmissionUtilsTest, FindTiledTransposeOneSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT c = f32[100,11,12,8]{1,0,2,3} copy(p)
}
ENTRY main {
param = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT fusion = f32[100,11,12,8]{1,0,2,3} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* copy =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*copy, FindNonTrivialHero(*copy));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, copy);
EXPECT_EQ(result->dimensions, Vector3({8, 12, 1100}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindTiledLogicalTransposeOneSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT t = f32[8,12,100,11]{3,2,1,0} transpose(p), dimensions={3,2,0,1}
}
ENTRY main {
param = f32[100,11,12,8]{3,2,1,0} parameter(0)
ROOT fusion = f32[8,12,100,11]{3,2,1,0} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* tr =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*tr, FindNonTrivialHero(*tr));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, tr);
EXPECT_EQ(result->dimensions, Vector3({8, 12, 1100}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindTiledTransposeOtherSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT c = f32[8,12,100,11]{0,1,3,2} copy(p)
}
ENTRY main {
param = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT fusion = f32[8,12,100,11]{0,1,3,2} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* copy =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*copy, FindNonTrivialHero(*copy));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, copy);
EXPECT_EQ(result->dimensions, Vector3({1100, 12, 8}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, FindTiledLogicalTransposeOtherSwapDimIsSmall) {
const char* hlo = R"(
HloModule module
fusion {
p = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT t = f32[100,11,12,8]{3,2,1,0} transpose(p), dimensions={2,3,1,0}
}
ENTRY main {
param = f32[8,12,100,11]{3,2,1,0} parameter(0)
ROOT fusion = f32[100,11,12,8]{3,2,1,0} fusion(param), kind=kInput, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* tr =
module->entry_computation()->root_instruction()->fused_expression_root();
auto result =
GetDescriptionForTiledTransposeEmitter(*tr, FindNonTrivialHero(*tr));
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result->instr, tr);
EXPECT_EQ(result->dimensions, Vector3({1100, 12, 8}));
EXPECT_EQ(result->permutation, Vector3({2, 1, 0}));
}
TEST_F(IrEmissionUtilsTest, IsContiguousSlice) {
const char* hlo = R"(
HloModule module
ENTRY entry {
p = f32[8,12,100,11]{3,2,1,0} parameter(0)
slice.1 = f32[2,12,100,11]{3,2,1,0} slice(p), slice={[1:3], [0:12], [0:100], [0:11]}
slice.2 = f32[1,1,1,11]{3,2,1,0} slice(p), slice={[1:2], [0:1], [0:1], [0:11]}
slice.3 = f32[1,1,10,11]{3,2,1,0} slice(p), slice={[1:2], [0:1], [0:10], [0:11]}
slice.4 = f32[1,2,10,11]{3,2,1,0} slice(p), slice={[1:2], [0:2], [0:10], [0:11]}
slice.5 = f32[8,2,100,11]{3,2,1,0} slice(p), slice={[0:8], [10:12], [0:100], [0:11]}
c = f32[8,12,100,11]{0,1,3,2} copy(p)
slice.6 = f32[8,12,40,11]{0,1,3,2} slice(c), slice={[0:8], [0:12], [10:50], [0:11]}
slice.7 = f32[8,12,1,2]{0,1,3,2} slice(c), slice={[0:8], [0:12], [0:1], [0:2]}
slice.8 = f32[8,2,100,11]{0,1,3,2} slice(c), slice={[0:8], [0:2], [0:100], [0:11]}
slice.9 = f32[8,2,40,11]{0,1,3,2} slice(c), slice={[0:8], [10:12], [10:50], [0:11]}
slice.10 = f32[8,2,50,11]{3,2,1,0} slice(p), slice={[0:8:1], [10:12:1], [0:100:2], [0:11:1]}
ROOT t = (f32[2,12,100,11]{3,2,1,0},
f32[1,1,1,11]{3,2,1,0},
f32[1,1,10,11]{3,2,1,0},
f32[1,2,10,11]{3,2,1,0},
f32[8,2,100,11]{3,2,1,0},
f32[8,12,40,11]{0,1,3,2},
f32[8,12,1,2]{0,1,3,2},
f32[8,2,100,11]{0,1,3,2},
f32[8,2,40,11]{0,1,3,2},
f32[8,2,50,11]{3,2,1,0}) tuple(slice.1, slice.2, slice.3, slice.4, slice.5, slice.6, slice.7, slice.8, slice.9, slice.10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstruction* slice1 =
module->entry_computation()->GetInstructionWithName("slice.1");
HloInstruction* slice2 =
module->entry_computation()->GetInstructionWithName("slice.2");
HloInstruction* slice3 =
module->entry_computation()->GetInstructionWithName("slice.3");
HloInstruction* slice4 =
module->entry_computation()->GetInstructionWithName("slice.4");
HloInstruction* slice5 =
module->entry_computation()->GetInstructionWithName("slice.5");
HloInstruction* slice6 =
module->entry_computation()->GetInstructionWithName("slice.6");
HloInstruction* slice7 =
module->entry_computation()->GetInstructionWithName("slice.7");
HloInstruction* slice8 =
module->entry_computation()->GetInstructionWithName("slice.8");
HloInstruction* slice9 =
module->entry_computation()->GetInstructionWithName("slice.9");
HloInstruction* slice10 =
module->entry_computation()->GetInstructionWithName("slice.10");
EXPECT_TRUE(IsContiguousSlice(*slice1));
EXPECT_TRUE(IsContiguousSlice(*slice2));
EXPECT_TRUE(IsContiguousSlice(*slice3));
EXPECT_TRUE(!IsContiguousSlice(*slice4));
EXPECT_TRUE(!IsContiguousSlice(*slice5));
EXPECT_TRUE(IsContiguousSlice(*slice6));
EXPECT_TRUE(IsContiguousSlice(*slice7));
EXPECT_TRUE(!IsContiguousSlice(*slice8));
EXPECT_TRUE(!IsContiguousSlice(*slice9));
EXPECT_TRUE(!IsContiguousSlice(*slice10));
}
TEST_F(IrEmissionUtilsTest, LiteralToAttrToXlaFormat) {
{
Literal literal = LiteralUtil::CreateR2<int16_t>({{0, 1, 2}, {3, 4, 5}});
TF_ASSERT_OK_AND_ASSIGN(DenseDataIntermediate data,
LiteralToXlaFormat(literal));
EXPECT_EQ(data.span().size(), literal.size_bytes());
EXPECT_EQ(reinterpret_cast<const char*>(data.span().data()),
literal.untyped_data());
}
{
Literal literal = LiteralUtil::CreateR2<s4>(
{{s4(0), s4(1), s4(2)}, {s4(3), s4(4), s4(5)}});
TF_ASSERT_OK_AND_ASSIGN(DenseDataIntermediate data,
LiteralToXlaFormat(literal));
EXPECT_EQ(data.span(), std::vector<uint8_t>({0x01, 0x23, 0x45}));
EXPECT_NE(reinterpret_cast<const void*>(data.span().data()),
literal.untyped_data());
}
{
Literal literal = LiteralUtil::CreateR2<u4>(
{{u4(0), u4(1), u4(2)}, {u4(3), u4(4), u4(5)}, {u4(6), u4(7), u4(8)}});
TF_ASSERT_OK_AND_ASSIGN(DenseDataIntermediate data,
LiteralToXlaFormat(literal));
EXPECT_EQ(data.span(),
std::vector<uint8_t>({0x01, 0x23, 0x45, 0x67, 0x80}));
EXPECT_NE(reinterpret_cast<const void*>(data.span().data()),
literal.untyped_data());
}
}
TEST_F(IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_HandlesBitcasts) {
const char* hlo = R"(
HloModule fusion, is_scheduled=true
fused_computation {
param_0.1 = s32[6]{0} parameter(0)
bitcast = s32[2,3]{1,0} bitcast(param_0.1)
zero = s32[] constant(0)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[1,1]{1,0} dynamic-slice(bitcast, param_1.1, zero), dynamic_slice_sizes={1,1}
one = s32[] constant(1)
bitcasted_one = s32[1,1]{1,0} bitcast(one)
add = s32[1,1] add(dynamic-slice, bitcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[6]{0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(true));
}
TEST_F(
IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_ElementwiseOnPathToParameter) {
const char* hlo = R"(
HloModule fusion, is_scheduled=true
fused_computation {
param_0.1 = s32[2,3]{1,0} parameter(0)
bitcast = s32[2,3]{1,0} negate(param_0.1)
zero = s32[] constant(0)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[1,1]{1,0} dynamic-slice(bitcast, param_1.1, zero), dynamic_slice_sizes={1,1}
one = s32[] constant(1)
bitcasted_one = s32[1,1]{1,0} bitcast(one)
add = s32[1,1] add(dynamic-slice, bitcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[2,3]{1,0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(false));
}
TEST_F(IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_SlicesDifferent) {
const char* hlo = R"(
HloModule fusion, is_scheduled=true
fused_computation {
param_0.1 = s32[6]{0} parameter(0)
bitcast = s32[2,3]{1,0} bitcast(param_0.1)
zero = s32[] constant(0)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[1,1]{1,0} dynamic-slice(bitcast, param_1.1, zero), dynamic_slice_sizes={1,1}
one = s32[] constant(1)
bitcasted_one = s32[1,1]{1,0} bitcast(one)
add = s32[1,1] add(dynamic-slice, bitcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[6]{0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
BufferAllocation::Slice slice1(&alloc, 10, 20);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[fusion, &slice0, &slice1](const HloInstruction* instr,
const ShapeIndex&) {
if (instr == fusion) {
return slice0;
}
return slice1;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(false));
}
TEST_F(
IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_DynamicUpdateSliceWithDifferentDynamicSliceAccess) {
const char* hlo = R"(
HloModule fusion, input_output_alias={ {}: (0, {}) }
fused_computation {
param_0.1 = s32[6]{0} parameter(0)
bitcast = s32[2,3]{1,0} bitcast(param_0.1)
zero = s32[] constant(0)
one = s32[] constant(1)
param_1.1 = s32[] parameter(1)
dynamic-slice = s32[2,2]{1,0} dynamic-slice(bitcast, param_1.1, one), dynamic_slice_sizes={2,2}
broadcasted_one = s32[2,2]{1,0} broadcast(one), dimensions={}
add = s32[2,2] add(dynamic-slice, broadcasted_one)
dynamic-update-slice = s32[2,3]{1,0} dynamic-update-slice(bitcast, add, param_1.1, zero)
ROOT bitcast.1 = s32[6]{0} bitcast(dynamic-update-slice)
}
ENTRY main {
param_0 = s32[6]{0} parameter(0)
param_1 = s32[] parameter(1)
ROOT fusion = s32[6]{0} fusion(param_0, param_1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(false));
}
TEST_F(IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_HandlesMultiOutputFusion) {
const char* hlo = R"(
HloModule MultipleInplaceDus, is_scheduled=true, input_output_alias={ {0}: (0, {}), {1}: (2, {}) }
fused_computation {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
c0 = s32[] constant(0)
cmp = pred[] compare(p4, c0), direction=EQ
broadcast = pred[1,11,12] broadcast(cmp), dimensions={}
select = bf16[1,11,12] select(broadcast, p1, p3)
dus0 = bf16[10,11,12] dynamic-update-slice(p0, select, c0, c0, c0)
dus1 = bf16[8,11,12] dynamic-update-slice(p2, select, c0, c0, c0)
ROOT tuple = (bf16[10,11,12], bf16[8,11,12]) tuple(dus0, dus1)
}
ENTRY main {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
ROOT fusion_root_multiple = (bf16[10,11,12], bf16[8,11,12]) fusion(p0, p1, p2, p3, p4), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
auto fusion = module->entry_computation()->root_instruction();
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
EXPECT_THAT(CanEmitFusedDynamicUpdateSliceInPlaceForGpu(
Cast<HloFusionInstruction>(fusion),
[&slice0](const HloInstruction*, const ShapeIndex&) {
return slice0;
},
HloFusionAdaptor::ForInstruction(fusion)->GetRoots()),
IsOkAndHolds(true));
}
TEST_F(
IrEmissionUtilsTest,
CanEmitFusedDynamicUpdateSliceInPlaceForGpu_HandlesMultiOutputFusionWithTransposeBitcasts) {
const char* hlo = R"(
HloModule MultipleInplaceDusWithTransposeBitcastToTheRoot, is_scheduled=true, input_output_alias={ {0}: (0, {}), {1}: (2, {}) }
fused_computation {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
c0 = s32[] constant(0)
cmp = pred[] compare(p4, c0), direction=EQ
broadcast = pred[1,11,12] broadcast(cmp), dimensions={}
select = bf16[1,11,12] select(broadcast, p1, p3)
dus0 = bf16[10,11,12] dynamic-update-slice(p0, select, c0, c0, c0)
bitcasted_dus0 = bf16[11,10,12] bitcast(dus0)
dus1 = bf16[8,11,12] dynamic-update-slice(p2, select, c0, c0, c0)
ROOT tuple = (bf16[11,10,12], bf16[8,11,12]) tuple(bitcasted_dus0, dus1)
}
ENTRY main {
p0 = bf16[10,11,12] parameter(0)
p1 = bf16[1,11,12] parameter(1)
p2 = bf16[8,11,12] parameter(2)
p3 = bf16[1,11,12] parameter(3)
p4 = s32[] parameter(4)
ROOT fusi | 2,016 |
#ifndef XLA_SERVICE_CPU_ONEDNN_SOFTMAX_H_
#define XLA_SERVICE_CPU_ONEDNN_SOFTMAX_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
namespace xla {
namespace cpu {
extern "C" {
extern void __xla_cpu_runtime_OneDnnSoftmax(const void* run_options_ptr,
void* input, void* result,
void* softmax_config_ptr);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_softmax.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnSoftmax(
const void* run_options_ptr, void* input, void* result,
void* softmax_config_ptr) {
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(run_options_ptr);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
dnnl::engine cpu_engine(dnnl::engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream = dnnl::stream(
dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = dnnl::stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(softmax_config_ptr));
OneDnnSoftmaxConfig softmax_config;
softmax_config.ParseFromString(config_str);
MemrefInfo input_minfo(input);
MemrefInfo result_minfo(result);
auto src_md = input_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto src_mem = dnnl::memory(src_md, cpu_engine, input_minfo.Data());
auto dst_mem = dnnl::memory(dst_md, cpu_engine, result_minfo.Data());
int axis = softmax_config.softmax_axis();
auto softmax_pd = dnnl::softmax_forward::primitive_desc(
cpu_engine, dnnl::prop_kind::forward_inference,
dnnl::algorithm::softmax_accurate, src_md, dst_md, axis);
auto softmax_prim = dnnl::softmax_forward(softmax_pd);
std::unordered_map<int, dnnl::memory> softmax_args;
softmax_args.insert({DNNL_ARG_SRC, src_mem});
softmax_args.insert({DNNL_ARG_DST, dst_mem});
softmax_prim.execute(onednn_stream, softmax_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "xla/literal.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_ops_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace cpu {
std::string TestParamsToString(
const ::testing::TestParamInfo<std::tuple<PrimitiveType, int>>& data) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = data.param;
return absl::StrCat(primitive_util::LowercasePrimitiveTypeName(data_type),
"_BatchSize", std::to_string(batch_size));
}
class OneDnnSoftmaxTest
: public HloTestBase,
public ::testing::WithParamInterface<std::tuple<PrimitiveType, int>> {
protected:
const char* onednn_softmax_ =
R"(
; CHECK: custom_call_target="__onednn$softmax"
)";
void TestSoftmax(std::string input_hlo_string, int expected_softmax_axis) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(input_hlo_string));
OneDnnOpsRewriter softmax_rewrite_pass;
HloInstruction* onednn_softmax;
OneDnnSoftmaxConfig softmax_config;
TF_ASSERT_OK_AND_ASSIGN(
bool changed, this->RunHloPass(&softmax_rewrite_pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(::xla::match::CustomCall(&onednn_softmax,
{"__onednn$softmax"})));
auto backend_config = onednn_softmax->backend_config<BackendConfig>();
softmax_config.CopyFrom(backend_config->onednn_softmax_config());
int axis_after_rewrite = softmax_config.softmax_axis();
EXPECT_EQ(expected_softmax_axis, axis_after_rewrite);
}
};
TEST_P(OneDnnSoftmaxTest, SoftmaxGenericTest) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = GetParam();
if (!IsSupportedType(data_type)) {
GTEST_SKIP() << "CPU does not support "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
}
const std::string softmax_hlo_template_string = R"(
HloModule softmax_module
region_max {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = $0[$1,128,30522]{2,1,0} parameter(0)
neg_inf = $0[] constant(-inf)
reduce_max = $0[$1,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = $0[$1,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = $0[$1,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = $0[$1,128]{1,0} reshape(broadcast.0)
broadcast.1 = $0[$1,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract.0 = $0[$1,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = $0[$1,128,30522]{2,1,0} exponential(subtract.0)
const_zero = $0[] constant(0)
reduce_add = $0[$1,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = $0[$1,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = $0[$1,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = $0[$1,128]{1,0} reshape(broadcast.2)
broadcast.3 = $0[$1,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
ROOT divide = $0[$1,128,30522]{2,1,0} divide(exponential, broadcast.3)
}
)";
const std::string softmax_hlo_string = absl::Substitute(
softmax_hlo_template_string,
primitive_util::LowercasePrimitiveTypeName(data_type), batch_size);
TestSoftmax(softmax_hlo_string, 2);
}
INSTANTIATE_TEST_SUITE_P(OneDnnSoftmaxTestSuite, OneDnnSoftmaxTest,
::testing::Combine(::testing::ValuesIn({F32, BF16,
F16}),
::testing::Values(1, 16)),
TestParamsToString);
TEST_F(OneDnnSoftmaxTest, SoftmaxFP32OnAxisZero) {
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[3,1,1]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[1,1]{1,0} reduce(Arg_0, neg_inf), dimensions={0}, to_apply=region_max
neg_inf.1 = f32[1,1]{1,0} constant({ {-inf} })
maximum = f32[1,1]{1,0} maximum(reduce_max, neg_inf.1)
reshape.0 = f32[1,1,1]{2,1,0} reshape(maximum)
broadcast.0 = f32[1,1,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[1,1]{1,0} reshape(broadcast.0)
broadcast.1 = f32[3,1,1]{2,1,0} broadcast(reshape.1), dimensions={1,2}
subtract = f32[3,1,1]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[3,1,1]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[1,1]{1,0} reduce(exponential, const_zero), dimensions={0}, to_apply=region_add
reshape.2 = f32[1,1,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[1,1,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[1,1]{1,0} reshape(broadcast.2)
broadcast.3 = f32[3,1,1]{2,1,0} broadcast(reshape.3), dimensions={1,2}
ROOT divide = f32[3,1,1]{2,1,0} divide(exponential, broadcast.3)
}
)";
TestSoftmax(softmax_hlo_string, 0);
}
TEST_F(OneDnnSoftmaxTest, SoftmaxWithBF16ConvertOutputFP32Pattern) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[16,128,30522]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[16,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = f32[16,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = f32[16,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[16,128]{1,0} reshape(broadcast.0)
broadcast.1 = f32[16,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract = f32[16,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[16,128,30522]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[16,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = f32[16,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[16,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[16,128]{1,0} reshape(broadcast.2)
broadcast.3 = f32[16,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
divide = f32[16,128,30522]{2,1,0} divide(exponential, broadcast.3)
ROOT convert = bf16[16,128,30522]{2,1,0} convert(divide)
}
)";
TestSoftmax(softmax_hlo_string, 2);
}
}
}
#endif | 2,017 |
#ifndef XLA_SERVICE_CPU_PARALLEL_TASK_ASSIGNMENT_H_
#define XLA_SERVICE_CPU_PARALLEL_TASK_ASSIGNMENT_H_
#include <cstdint>
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
class ParallelCostModel {
public:
virtual ~ParallelCostModel() = default;
virtual int64_t GetParallelTaskCount(HloInstruction* instruction) = 0;
};
class ParallelTaskAssignment {
public:
ParallelTaskAssignment(int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
HloModule* module,
const TargetMachineFeatures* target_machine_features);
~ParallelTaskAssignment() {}
int64_t GetTargetParallelTaskCount(HloInstruction* instruction);
private:
std::unique_ptr<ParallelCostModel> cost_model_;
const TargetMachineFeatures& target_machine_features_;
};
class ParallelTaskAssigner : public HloModulePass {
public:
ParallelTaskAssigner(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
const TargetMachineFeatures* target_machine_features)
: max_parallelism_(max_parallelism),
shape_size_function_(shape_size),
target_machine_features_(*target_machine_features) {}
~ParallelTaskAssigner() override {}
absl::string_view name() const override {
return "cpu-parallel-task-assigner";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
using HloToParallelTasks =
absl::flat_hash_map<const HloInstruction*, int64_t>;
bool AssignParallelTasks(HloModule* module,
const HloToParallelTasks& hlo_to_parallel_tasks);
bool AssignParallelTasksHelper(
HloModule* module, HloComputation* computation,
const HloToParallelTasks& hlo_to_parallel_tasks);
void ComputeTargetParallelTasks(HloModule* module,
HloToParallelTasks* hlo_to_parallel_tasks);
int64_t max_parallelism_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
const TargetMachineFeatures& target_machine_features_;
};
}
}
#endif
#include "xla/service/cpu/parallel_task_assignment.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/service/cpu/shape_partition.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace cpu {
class SimpleCostModel : public ParallelCostModel {
public:
SimpleCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size)
: max_parallelism_(max_parallelism), shape_size_(shape_size) {}
~SimpleCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
const int64_t instruction_cost = shape_size_(instruction->shape());
const int64_t min_cost_per_thread = 256LL << 10;
return std::min(
max_parallelism_,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
};
class DefaultCostModel : public ParallelCostModel {
public:
DefaultCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
std::unique_ptr<HloCostAnalysis> cost_analysis)
: max_parallelism_(max_parallelism),
shape_size_(shape_size),
cost_analysis_(std::move(cost_analysis)) {}
~DefaultCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
int64_t instruction_cost;
int64_t min_cost_per_thread;
int64_t max_parallelism;
const int64_t bytes_accessed =
std::max(int64_t{1}, cost_analysis_->bytes_accessed(*instruction));
const float flops_to_bytes_ratio =
cost_analysis_->flop_count(*instruction) /
static_cast<float>(bytes_accessed);
if (flops_to_bytes_ratio <= 1.0) {
max_parallelism = std::min<int64_t>(
max_parallelism_, std::ceil(std::sqrt(tsl::port::MaxParallelism())));
instruction_cost = shape_size_(instruction->shape());
min_cost_per_thread = 256LL << 10;
} else {
max_parallelism = max_parallelism_;
instruction_cost =
1 * cost_analysis_->flop_count(*instruction) +
2 * cost_analysis_->transcendental_count(*instruction) +
10 * cost_analysis_->bytes_accessed(*instruction);
min_cost_per_thread = 100000;
}
return std::min(
max_parallelism,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
const std::unique_ptr<HloCostAnalysis> cost_analysis_;
};
ParallelTaskAssignment::ParallelTaskAssignment(
const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size, HloModule* module,
const TargetMachineFeatures* target_machine_features)
: target_machine_features_(*target_machine_features) {
VLOG(1) << "ParallelTaskAssignment max_parallelism: " << max_parallelism;
auto cost_analysis = std::make_unique<HloCostAnalysis>(shape_size);
HloComputation* computation = module->entry_computation();
absl::Status status =
computation->root_instruction()->Accept(cost_analysis.get());
if (status.ok()) {
cost_model_ = std::make_unique<DefaultCostModel>(
max_parallelism, shape_size, std::move(cost_analysis));
} else {
cost_model_ =
std::make_unique<SimpleCostModel>(max_parallelism, shape_size);
}
}
int64_t ParallelTaskAssignment::GetTargetParallelTaskCount(
HloInstruction* instruction) {
auto opcode = instruction->opcode();
if (llvm_ir::MayBeImplementedAsInPlaceDynamicUpdateSlice(instruction) ||
instruction->shape().IsTuple() || opcode == HloOpcode::kRng ||
opcode == HloOpcode::kConstant) {
return 1;
}
if (instruction->IsElementwise() || instruction->IsLoopFusion() ||
opcode == HloOpcode::kBroadcast || opcode == HloOpcode::kConcatenate ||
opcode == HloOpcode::kDynamicSlice ||
opcode == HloOpcode::kDynamicUpdateSlice ||
opcode == HloOpcode::kGather || opcode == HloOpcode::kIota ||
opcode == HloOpcode::kPad || opcode == HloOpcode::kReduce ||
opcode == HloOpcode::kReduceWindow || opcode == HloOpcode::kReshape ||
opcode == HloOpcode::kReverse || opcode == HloOpcode::kSlice ||
opcode == HloOpcode::kTranspose ||
(opcode == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*instruction,
target_machine_features_))) {
return cost_model_->GetParallelTaskCount(instruction);
}
return 1;
}
absl::StatusOr<bool> ParallelTaskAssigner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "ParallelTaskAssigner ENTRY");
XLA_VLOG_LINES(3, module->ToString());
HloToParallelTasks hlo_to_parallel_tasks;
ComputeTargetParallelTasks(module, &hlo_to_parallel_tasks);
bool changed = AssignParallelTasks(module, hlo_to_parallel_tasks);
XLA_VLOG_LINES(2, "ParallelTaskAssigner EXIT");
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
bool ParallelTaskAssigner::AssignParallelTasks(
HloModule* module, const HloToParallelTasks& hlo_to_parallel_tasks) {
return AssignParallelTasksHelper(module, module->entry_computation(),
hlo_to_parallel_tasks);
}
bool ParallelTaskAssigner::AssignParallelTasksHelper(
HloModule* module, HloComputation* computation,
const HloToParallelTasks& hlo_to_parallel_tasks) {
bool changed = false;
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
for (auto* instruction : instructions) {
if (instruction->opcode() == HloOpcode::kWhile) {
changed |= AssignParallelTasksHelper(module, instruction->while_body(),
hlo_to_parallel_tasks);
continue;
} else if (instruction->opcode() == HloOpcode::kCall) {
changed |= AssignParallelTasksHelper(module, instruction->to_apply(),
hlo_to_parallel_tasks);
continue;
}
auto it = hlo_to_parallel_tasks.find(instruction);
if (it == hlo_to_parallel_tasks.end()) {
continue;
}
const int64_t target_parallel_task_count = (*it).second;
auto dim_partition_counts = ShapePartitionAssigner(instruction->shape())
.Run(target_parallel_task_count);
const int64_t total_partition_count =
ShapePartitionAssigner::GetTotalPartitionCount(dim_partition_counts);
if (total_partition_count <= 1) {
continue;
}
auto* call = module->OutlineExpressionFromComputation(
{instruction}, absl::StrCat("parallel_", instruction->name()),
computation);
auto* new_root = call->to_apply()->root_instruction();
BackendConfig backend_config;
absl::c_copy(dim_partition_counts,
tsl::protobuf::RepeatedFieldBackInserter(
backend_config.mutable_outer_dimension_partitions()));
TF_CHECK_OK(new_root->set_backend_config(backend_config));
VLOG(2) << "Assigned parallel task count: " << total_partition_count
<< " to instruction: " << new_root->name()
<< " parent: " << new_root->parent()->name();
changed = true;
}
return changed;
}
void ParallelTaskAssigner::ComputeTargetParallelTasks(
HloModule* module, HloToParallelTasks* hlo_to_parallel_tasks) {
ParallelTaskAssignment parallel_task_assignment(max_parallelism_,
shape_size_function_, module,
&target_machine_features_);
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instruction : computation->instructions()) {
const int64_t target_parallel_task_count =
parallel_task_assignment.GetTargetParallelTaskCount(instruction);
if (target_parallel_task_count > 1) {
hlo_to_parallel_tasks->insert(
{instruction, target_parallel_task_count});
}
}
}
}
}
} | #include "xla/service/cpu/parallel_task_assignment.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class ParallelTaskAssignmentTest : public HloTestBase {
protected:
const HloCostAnalysis::ShapeSizeFunction shape_size_func_ =
cpu::CpuExecutable::ShapeSizeBytes;
const int max_parallelism_ = 10;
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features_;
ParallelTaskAssignmentTest()
: HloTestBase(), target_machine_features_([](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
}) {}
absl::StatusOr<bool> RunParallelTaskAssigner(HloModule* module) {
return cpu::ParallelTaskAssigner(max_parallelism_, shape_size_func_,
&target_machine_features_)
.Run(module);
}
};
TEST_F(ParallelTaskAssignmentTest, DotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_Dot
ENTRY Dot {
dot_lhs = f32[196614,2]{1,0} parameter(0)
dot_rhs = f32[2,1]{1,0} parameter(1)
ROOT dot = f32[196614,1]{1,0} dot(dot_lhs, dot_rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest,
FusedComputationWithDotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_DotNestedInFusedComp
fused_computation.0 {
parameter.0 = f32[196614,2]{1,0} parameter(0)
parameter.0.1 = f32[2,1]{1,0} parameter(1)
parameter.0.2 = f32[196614,1]{1,0} parameter(2)
dot.0 = f32[196614,1]{1,0} dot(parameter.0, parameter.0.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT add.0 = f32[196614,1]{1,0} add(dot.0, parameter.0.2)
}
ENTRY DotNestedInFusedComp {
parameter = f32[196614,2]{1,0} parameter(0)
parameter.1 = f32[2,1]{1,0} parameter(1)
parameter.2 = f32[196614,1]{1,0} parameter(2)
ROOT fusion = f32[196614,1]{1,0} fusion(parameter, parameter.1,
parameter.2), kind=kOutput, calls=fused_computation.0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, RngOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_rng
ENTRY Rng {
src0 = f32[] parameter(0)
src1 = f32[] parameter(1)
ROOT rng0 = f32[1234567,2]{1,0} rng(f32[] src0, f32[] src1),
distribution=rng_uniform
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InfeedOutfeedOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_infeed_outfeed
ENTRY InfeedOutfeed {
token0 = token[] after-all()
infeed0 = (u32[12345678,2]{1,0}, token[]) infeed(token0)
infeed0.data = u32[12345678,2]{1,0} get-tuple-element((u32[12345678,2]{1,0}, token[]) infeed0), index=0
ROOT outfeed0 = token[] outfeed(infeed0.data, token0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InPlaceDynamicUpdateSliceNotParallelized) {
const std::string hlo_string = R"(
HloModule test
body {
zero = s32[] constant(0)
one = s32[] constant(1)
ten = s32[] constant(10)
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
i = s32[] get-tuple-element(loop_carry), index=0
i_plus_ten = s32[] add(i, ten)
update = u32[1,100] get-tuple-element(loop_carry), index=1
data = u32[10000,100] get-tuple-element(loop_carry), index=2
new_data = u32[10000,100] dynamic-update-slice(data, update, i_plus_ten, zero)
new_i = s32[] add(i, one)
ROOT tuple = (s32[], u32[1,100], u32[10000,100]) tuple(new_i, update, new_data)
}
cond {
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
two = s32[] constant(2)
i = s32[] get-tuple-element(loop_carry), index=0
ROOT less-than = pred[] compare(i, two), direction=LT
}
ENTRY test {
zero = s32[] constant(0)
initial_i = s32[] parameter(0)
update = u32[1,100] parameter(1)
data = u32[10000,100] parameter(2)
tuple = (s32[], u32[1,100], u32[10000,100]) tuple(initial_i, update, data)
ROOT while = (s32[], u32[1,100], u32[10000,100]) while(tuple), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, AllReduceNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_allreduce
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[1234567] parameter(0)
ROOT crs = f32[1234567] all-reduce(input), replica_groups={}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, ConstantNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_constant
ENTRY const {
ROOT constant = f32[1234567] constant({...})
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
}
} | 2,018 |
#ifndef XLA_SERVICE_CPU_XFEED_MANAGER_H_
#define XLA_SERVICE_CPU_XFEED_MANAGER_H_
#include <deque>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/shape.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace cpu {
namespace runtime {
class XfeedBuffer {
public:
virtual ~XfeedBuffer() = default;
virtual int32_t length() = 0;
virtual void* data() = 0;
virtual void Done(absl::StatusOr<Shape> shape) = 0;
};
class XfeedQueueManager {
public:
XfeedQueueManager(std::string queue_name) : queue_name_(queue_name) {}
void Reset();
void EnqueueBuffersAtomically(absl::Span<XfeedBuffer* const> buffers);
XfeedBuffer* BlockingDequeueBuffer();
void ReleaseCurrentBuffer(int32_t length, void* data,
absl::StatusOr<Shape> shape);
private:
const std::string queue_name_;
absl::Mutex mu_;
absl::CondVar cv_;
std::deque<XfeedBuffer*> enqueued_buffers_;
XfeedBuffer* current_buffer_ = nullptr;
};
class XfeedManager {
public:
XfeedManager() = default;
void Reset();
XfeedQueueManager* infeed() { return &infeed_; }
XfeedQueueManager* outfeed() { return &outfeed_; }
private:
XfeedQueueManager infeed_ = {"infeed"};
XfeedQueueManager outfeed_ = {"outfeed"};
};
int64_t GetByteSizeRequirement(const Shape& shape, int64_t pointer_size);
}
}
}
#endif
#include "xla/service/cpu/xfeed_manager.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace cpu {
namespace runtime {
void XfeedManager::Reset() {
infeed()->Reset();
outfeed()->Reset();
}
void XfeedQueueManager::Reset() {
absl::MutexLock l(&mu_);
CHECK(current_buffer_ == nullptr);
for (auto buffer : enqueued_buffers_) {
buffer->Done(ShapeUtil::MakeNil());
}
enqueued_buffers_.clear();
}
void XfeedQueueManager::EnqueueBuffersAtomically(
absl::Span<XfeedBuffer* const> buffers) {
absl::MutexLock l(&mu_);
bool was_empty = enqueued_buffers_.empty();
for (XfeedBuffer* b : buffers) {
VLOG(3) << "Enqueueing " << queue_name_ << " buffer (of " << buffers.size()
<< " buffers) with length: " << b->length();
enqueued_buffers_.push_back(b);
}
if (was_empty && !buffers.empty()) {
cv_.Signal();
}
}
XfeedBuffer* XfeedQueueManager::BlockingDequeueBuffer() {
absl::MutexLock l(&mu_);
VLOG(3) << "Waiting for an available buffer.";
while (enqueued_buffers_.empty()) {
cv_.Wait(&mu_);
}
VLOG(3) << "A buffer is available!";
CHECK(current_buffer_ == nullptr);
current_buffer_ = enqueued_buffers_.front();
enqueued_buffers_.pop_front();
return current_buffer_;
}
void XfeedQueueManager::ReleaseCurrentBuffer(int32_t length, void* data,
absl::StatusOr<Shape> shape) {
VLOG(3) << "Releasing buffer with shape: "
<< (shape.ok() ? ShapeUtil::HumanString(shape.value())
: "<error status>");
absl::MutexLock l(&mu_);
CHECK(current_buffer_ != nullptr);
CHECK_EQ(length, current_buffer_->length());
CHECK_EQ(data, current_buffer_->data());
current_buffer_->Done(std::move(shape));
current_buffer_ = nullptr;
}
int64_t GetByteSizeRequirement(const Shape& shape, int64_t pointer_size) {
if (shape.IsTuple() || shape.is_static()) {
return ShapeUtil::ByteSizeOf(shape, pointer_size);
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return ShapeUtil::ByteSizeOf(shape, pointer_size) + metadata_size;
}
}
}
} | #include "xla/service/cpu/xfeed_manager.h"
#include <memory>
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/shape_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
class InfeedManagerTest : public ::testing::Test {};
class TestInfeedBuffer : public cpu::runtime::XfeedBuffer {
public:
explicit TestInfeedBuffer(int32_t length, bool expect_shape_match = true)
: shape_(ShapeUtil::MakeShape(U8, {length})),
done_called_(false),
length_(length),
expect_shape_match_(expect_shape_match) {}
~TestInfeedBuffer() override { EXPECT_TRUE(done_called_); }
int32_t length() override { return length_; }
void* data() override { return nullptr; }
void Done(absl::StatusOr<Shape> shape) override {
CHECK(!done_called_);
done_called_ = true;
TF_ASSERT_OK(shape.status());
EXPECT_EQ(expect_shape_match_, ShapeUtil::Equal(shape_, shape.value()))
<< "want " << ShapeUtil::HumanString(shape_) << " "
<< (expect_shape_match_ ? "==" : "!=") << " "
<< ShapeUtil::HumanString(shape.value());
delete this;
}
const Shape& shape() const { return shape_; }
private:
Shape shape_;
bool done_called_;
int32_t length_;
bool expect_shape_match_;
};
void ProcessNextBuffer(int32_t length) {
auto shape = ShapeUtil::MakeShape(U8, {length});
std::string bytes = shape.SerializeAsString();
void* buffer = __xla_cpu_runtime_AcquireInfeedBufferForDequeue(
nullptr, length, bytes.data(), bytes.size());
__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
nullptr, length, buffer, bytes.data(), bytes.size());
}
void ProcessNextOutfeedBuffer(int32_t length, const Shape& shape) {
std::string bytes = shape.SerializeAsString();
void* buffer = __xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
nullptr, length, bytes.data(), bytes.size());
__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
nullptr, length, buffer, bytes.data(), bytes.size());
}
TEST_F(InfeedManagerTest, SingleThreadedSequential) {
TestInfeedBuffer* a = new TestInfeedBuffer(64);
TestInfeedBuffer* b = new TestInfeedBuffer(32);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->infeed()->EnqueueBuffersAtomically({a});
xfeed->infeed()->EnqueueBuffersAtomically({b});
ProcessNextBuffer(a->length());
ProcessNextBuffer(b->length());
}
TEST_F(InfeedManagerTest, SingleThreadedInterleaved) {
TestInfeedBuffer* a = new TestInfeedBuffer(64);
TestInfeedBuffer* b = new TestInfeedBuffer(32);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->infeed()->EnqueueBuffersAtomically({a});
ProcessNextBuffer(a->length());
xfeed->infeed()->EnqueueBuffersAtomically({b});
ProcessNextBuffer(b->length());
}
TEST_F(InfeedManagerTest, MultiThreaded) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "test", 2);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
const int32_t length = 64;
pool.Schedule([length, &xfeed]() {
int64_t start_micros = tsl::Env::Default()->NowMicros();
while (true) {
int64_t end_micros = tsl::Env::Default()->NowMicros();
if ((end_micros - start_micros) >= 100000) {
break;
}
}
TestInfeedBuffer* a = new TestInfeedBuffer(length);
xfeed->infeed()->EnqueueBuffersAtomically({a});
});
ProcessNextBuffer(length);
}
TEST_F(InfeedManagerTest, OutfeedBasic) {
TestInfeedBuffer* b = new TestInfeedBuffer(32, true);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(32, ShapeUtil::MakeShape(U8, {32}));
}
TEST_F(InfeedManagerTest, OutfeedEmpty) {
TestInfeedBuffer* b = new TestInfeedBuffer(0, true);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(0, ShapeUtil::MakeShape(U8, {0}));
}
TEST_F(InfeedManagerTest, OutfeedWrongShape) {
TestInfeedBuffer* b = new TestInfeedBuffer(32, false);
cpu::runtime::XfeedManager* xfeed = cpu::runtime::GetXfeedManager(0);
xfeed->outfeed()->EnqueueBuffersAtomically({b});
ProcessNextOutfeedBuffer(32, ShapeUtil::MakeShape(U8, {33}));
}
}
} | 2,019 |
#ifndef XLA_SERVICE_CPU_ONEDNN_CONVOLUTION_H_
#define XLA_SERVICE_CPU_ONEDNN_CONVOLUTION_H_
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
namespace xla {
namespace cpu {
extern "C" {
extern void __xla_cpu_runtime_OneDnnConvolution(void* result, void** args);
}
}
}
#endif
#endif
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_convolution.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <initializer_list>
#include <utility>
#include <vector>
#define EIGEN_USE_THREADS
#include "dnnl.hpp"
#include "absl/base/dynamic_annotations.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace cpu {
namespace {
using dnnl::algorithm;
using dnnl::convolution_forward;
using dnnl::memory;
using dnnl::prop_kind;
using dnnl::stream;
}
dnnl::memory ReorderMemory(const dnnl::engine& engine,
const dnnl::memory::desc& dest_md,
dnnl::memory& src_mem,
const dnnl::stream& onednn_stream) {
auto dest_mem = memory(dest_md, engine);
dnnl::reorder(src_mem, dest_mem).execute(onednn_stream, src_mem, dest_mem);
return dest_mem;
}
dnnl::memory::format_tag GetFormatTag(const int dims) {
return (dims == 3) ? dnnl::memory::format_tag::nwc
: (dims == 4) ? dnnl::memory::format_tag::nhwc
: (dims == 5) ? dnnl::memory::format_tag::ndhwc
: dnnl::memory::format_tag::any;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnConvolution(
void* result, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
dnnl::engine cpu_engine(dnnl::engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream =
stream(dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnConvolutionConfig conv_config;
conv_config.ParseFromString(config_str);
std::vector<int64_t> inp_perm_axes(conv_config.dims());
std::vector<int64_t> ker_perm_axes(conv_config.dims());
std::vector<int64_t> out_perm_axes(conv_config.dims());
int index_i = 0;
int index_o = 0;
int index_k = 0;
inp_perm_axes[conv_config.input().data().batch_dim()] = index_i++;
out_perm_axes[conv_config.output().data().batch_dim()] = index_o++;
ker_perm_axes[conv_config.kernel().filter().output_feature_dim()] = index_k++;
inp_perm_axes[conv_config.input().data().feature_dim()] = index_i++;
out_perm_axes[conv_config.output().data().feature_dim()] = index_o++;
ker_perm_axes[conv_config.kernel().filter().input_feature_dim()] = index_k++;
std::vector<int64_t> inp_dim_axes(
conv_config.input().data().spatial_dims().begin(),
conv_config.input().data().spatial_dims().end());
std::vector<int64_t> ker_dim_axes(
conv_config.kernel().filter().spatial_dims().begin(),
conv_config.kernel().filter().spatial_dims().end());
std::vector<int64_t> out_dim_axes(
conv_config.output().data().spatial_dims().begin(),
conv_config.output().data().spatial_dims().end());
std::for_each(inp_dim_axes.begin(), inp_dim_axes.end(),
[&inp_perm_axes, &index_i](int64_t& n) {
n -= 1;
inp_perm_axes[n] = index_i++;
});
std::for_each(ker_dim_axes.begin(), ker_dim_axes.end(),
[&ker_perm_axes, &index_k](int64_t& n) {
n -= 1;
ker_perm_axes[n] = index_k++;
});
std::for_each(out_dim_axes.begin(), out_dim_axes.end(),
[&out_perm_axes, &index_o](int64_t& n) {
n -= 1;
out_perm_axes[n] = index_o++;
});
memory::dims strides(conv_config.window().strides().begin(),
conv_config.window().strides().end());
memory::dims pad_left(conv_config.window().pad_left().begin(),
conv_config.window().pad_left().end());
memory::dims pad_right(conv_config.window().pad_right().begin(),
conv_config.window().pad_right().end());
memory::dims rhs_dilations(conv_config.window().window_dilations().begin(),
conv_config.window().window_dilations().end());
std::for_each(strides.begin(), strides.end(), [](int64_t& n) { n -= 1; });
std::for_each(pad_left.begin(), pad_left.end(), [](int64_t& n) { n -= 1; });
std::for_each(pad_right.begin(), pad_right.end(), [](int64_t& n) { n -= 1; });
std::for_each(rhs_dilations.begin(), rhs_dilations.end(),
[](int64_t& n) { n -= 2; });
auto groups = conv_config.feature_groups();
MemrefInfo inp_minfo(args[arg_indx++]);
MemrefInfo ker_minfo(args[arg_indx++]);
MemrefInfo res_minfo(result);
auto inp_md = inp_minfo.GetOneDnnMemDesc();
auto ker_md = ker_minfo.GetOneDnnMemDesc();
auto res_md = res_minfo.GetOneDnnMemDesc();
std::vector<int> inp_axes(inp_perm_axes.begin(), inp_perm_axes.end());
std::vector<int> ker_axes(ker_perm_axes.begin(), ker_perm_axes.end());
std::vector<int> out_axes(out_perm_axes.begin(), out_perm_axes.end());
auto new_inp_md = inp_md.permute_axes(inp_axes);
auto new_ker_md = ker_md.permute_axes(ker_axes);
auto new_res_md = res_md.permute_axes(out_axes);
if (groups > 1) {
auto corr_dims = new_ker_md.get_dims();
corr_dims.insert(corr_dims.begin(), 1, groups);
corr_dims[1] = corr_dims[1] / groups;
new_ker_md = new_ker_md.reshape(corr_dims);
}
auto any_ker_md =
memory::desc(new_ker_md.get_dims(), new_ker_md.get_data_type(),
dnnl::memory::format_tag::any);
auto any_inp_md =
memory::desc(new_inp_md.get_dims(), new_inp_md.get_data_type(),
GetFormatTag(new_inp_md.get_ndims()));
auto any_res_md =
memory::desc(new_res_md.get_dims(), new_res_md.get_data_type(),
GetFormatTag(new_res_md.get_ndims()));
XLA_LIGHTWEIGHT_CHECK(num_args == arg_indx);
dnnl::primitive_attr attrs;
auto inp_mem = memory(new_inp_md, cpu_engine, inp_minfo.Data());
auto ker_mem = memory(new_ker_md, cpu_engine, ker_minfo.Data());
auto res_mem = memory(new_res_md, cpu_engine, res_minfo.Data());
auto conv_pd = convolution_forward::primitive_desc(
cpu_engine, prop_kind::forward_inference, algorithm::convolution_direct,
any_inp_md, any_ker_md, any_res_md, strides, rhs_dilations, pad_left,
pad_right, attrs);
auto new_inp_mem = (conv_pd.src_desc() == inp_mem.get_desc())
? inp_mem
: ReorderMemory(cpu_engine, conv_pd.src_desc(),
inp_mem, onednn_stream);
auto new_ker_mem = (conv_pd.weights_desc() == ker_mem.get_desc())
? ker_mem
: ReorderMemory(cpu_engine, conv_pd.weights_desc(),
ker_mem, onednn_stream);
auto new_res_mem = (conv_pd.dst_desc() == res_mem.get_desc())
? res_mem
: memory(conv_pd.dst_desc(), cpu_engine);
auto conv_prim = convolution_forward(conv_pd);
std::unordered_map<int, memory> conv_args{{DNNL_ARG_SRC, new_inp_mem},
{DNNL_ARG_WEIGHTS, new_ker_mem},
{DNNL_ARG_DST, new_res_mem}};
conv_prim.execute(onednn_stream, conv_args);
if (conv_pd.dst_desc() == res_mem.get_desc()) {
res_mem = new_res_mem;
} else {
dnnl::reorder(new_res_mem, res_mem)
.execute(onednn_stream, new_res_mem, res_mem);
}
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/cpu/onednn_matmul_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/cpu_info.h"
namespace xla {
namespace cpu {
class ConvolutionTest : public HloTestBase {
protected:
const char* conv_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$convolution",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_conv_config":{
; CHECK-DAG: }
; CHECK: }
)";
};
TEST_F(ConvolutionTest, Simple2DTestF32) {
const char* convolution_module_str = R"(
HloModule convolution.test.f32
ENTRY convolution.test.f32 {
arg.0 = f32[1,22,22,1] parameter(0), parameter_replication={false}
reshape.0 = f32[1,22,22,1] reshape(arg.0)
arg.1 = f32[8,8,1,1] parameter(1), parameter_replication={false}
reshape.1 = f32[8,8,1,1] reshape(arg.1)
convolution.0 = f32[1,11,11,1] convolution(reshape.0, reshape.1), window={size=8x8 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f
reshape.2 = f32[1,11,11,1] reshape(convolution.0)
tuple.0 = (f32[1,11,11,1]) tuple(reshape.2)
ROOT get-tuple-element.0 = f32[1,11,11,1] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_str_);
}
TEST_F(ConvolutionTest, Simple3DTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* convolution_module_str = R"(
HloModule convolution.test.bf16
ENTRY convolution.test.bf16 {
p0 = bf16[8,4,5,5,1] parameter(0)
p1 = bf16[3,3,3,1,32] parameter(1)
ROOT conv = bf16[8,4,5,5,32] convolution(p0, p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
})";
EXPECT_TRUE(RunAndCompare(convolution_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(convolution_module_str, conv_rewrite_str_);
}
}
}
#endif | 2,020 |
#ifndef XLA_SERVICE_CPU_CPU_LAYOUT_ASSIGNMENT_H_
#define XLA_SERVICE_CPU_CPU_LAYOUT_ASSIGNMENT_H_
#include "xla/service/computation_layout.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/layout_assignment.h"
#include "tsl/platform/status.h"
namespace xla {
namespace cpu {
class CpuLayoutAssignment : public LayoutAssignment {
public:
explicit CpuLayoutAssignment(
ComputationLayout* entry_computation_layout,
const TargetMachineFeatures* target_machine_features,
ChannelLayoutConstraints* channel_constraints = nullptr)
: LayoutAssignment(entry_computation_layout, channel_constraints),
target_machine_features_(*target_machine_features) {}
~CpuLayoutAssignment() override {}
protected:
absl::Status AddBackendConstraints(LayoutConstraints* constraints) override;
const TargetMachineFeatures& target_machine_features_;
};
}
}
#endif
#include "xla/service/cpu/cpu_layout_assignment.h"
#include <cstdint>
#include <numeric>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
namespace {
using std::nullopt;
using std::optional;
using ShouldMakeOperandColMajorCache =
absl::flat_hash_map<const HloInstruction*, bool>;
}
static bool ShouldMakeAllUsersColMajor(const HloInstruction* instruction) {
for (auto* user : instruction->users()) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(*user);
if (!operand_idx || user->operand(*operand_idx) != instruction ||
absl::c_count(user->operands(), instruction) != 1) {
return false;
}
}
return true;
}
static optional<int64_t> ShouldMakeOperandColumnMajor(
ShouldMakeOperandColMajorCache* cache, const HloInstruction& instruction) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(instruction);
if (!operand_idx) {
return nullopt;
}
const HloInstruction* operand = instruction.operand(*operand_idx);
if (operand->opcode() != HloOpcode::kConstant) {
return nullopt;
}
auto it = cache->find(operand);
if (it == cache->end()) {
auto insert_result =
cache->insert({operand, ShouldMakeAllUsersColMajor(operand)});
CHECK(insert_result.second);
it = insert_result.first;
}
return it->second ? operand_idx : nullopt;
}
static Shape RowMajorShape(Shape shape) {
ShapeUtil::ForEachMutableSubshape(
&shape, [](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
std::vector<int64_t> dimension_order(subshape->dimensions_size());
std::iota(dimension_order.rbegin(), dimension_order.rend(), 0);
*subshape->mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
});
return shape;
}
static Shape ColMajorShape(const Shape& old_shape) {
Shape new_shape(old_shape);
std::vector<int64_t> dimension_order(new_shape.dimensions_size());
std::iota(dimension_order.begin(), dimension_order.end(), 0);
*new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
return new_shape;
}
static bool OperandsAndResultMustHaveRowMajorLayout(
const HloInstruction& instr,
const TargetMachineFeatures& target_machine_features) {
if (instr.opcode() == HloOpcode::kConvolution) {
return PotentiallyImplementedAsEigenConvolution(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kDot) {
return DotOperandsAndResultMustHaveRowMajorLayout(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kCustomCall) {
return instr.custom_call_target() == "TopK";
}
return false;
}
absl::Status CpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
ShouldMakeOperandColMajorCache cache;
const HloComputation* computation = constraints->computation();
for (auto* instruction : computation->instructions()) {
if (OperandsAndResultMustHaveRowMajorLayout(*instruction,
target_machine_features_)) {
TF_RETURN_IF_ERROR(SetInstructionLayout(
RowMajorShape(instruction->shape()), instruction));
for (int i = 0; i < instruction->operand_count(); i++) {
TF_RETURN_IF_ERROR(SetOperandLayout(
RowMajorShape(instruction->operand(i)->shape()), instruction, i));
}
} else if (optional<int64_t> op_idx =
ShouldMakeOperandColumnMajor(&cache, *instruction)) {
const HloInstruction* op = instruction->operand(*op_idx);
TF_RETURN_IF_ERROR(
SetOperandLayout(ColMajorShape(op->shape()), instruction, *op_idx));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else {
for (int64_t operand_no = 0; operand_no < instruction->operand_count();
++operand_no) {
if (constraints->OperandLayout(instruction, operand_no) != nullptr) {
continue;
}
if (AnyOperandBufferForwarded(instruction, operand_no)) {
continue;
}
if (!instruction->operand(operand_no)->shape().IsArray()) {
continue;
}
Shape operand_shape(
RowMajorShape(instruction->operand(operand_no)->shape()));
TF_RETURN_IF_ERROR(
SetOperandLayout(operand_shape, instruction, operand_no));
}
if (computation->parent()->entry_computation() == computation &&
computation->root_instruction() == instruction) {
continue;
}
if (!instruction->shape().IsArray()) {
continue;
}
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/cpu_layout_assignment.h"
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class CpuLayoutAssignmentTest : public HloTestBase {
protected:
void AssignLayouts(HloModule* module,
ComputationLayout* entry_computation_layout) {
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(entry_computation_layout,
&target_machine_features);
EXPECT_IS_OK(layout_assignment.Run(module).status());
}
};
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
dot_lhs->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0}), result->shape().layout()));
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor0) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_b_lhs, dot_rhs));
builder.AddInstruction(HloInstruction::CreateBinary(
result_shape, HloOpcode::kAdd, dot_a_result, dot_b_result));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
for (HloInstruction* instruction :
{dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor1) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_a_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape lhs_b_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_a_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
Shape result_b_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {0, 1});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_a_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_b_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_a_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_b_shape, dot_b_lhs, dot_rhs));
auto tuple_result = builder.AddInstruction(
HloInstruction::CreateTuple({dot_a_result, dot_b_result}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_a_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_b_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(tuple_result->shape()));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction :
{dot_rhs, dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantLhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(lhs_shape)));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "param0"));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(rhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensorThroughGTE) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape other_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 24}, {0, 1});
auto constant_shape = ShapeUtil::MakeTupleShape({other_shape, rhs_shape});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(constant_shape)));
Shape result_shape = ShapeUtil::MakeShape(F32, {1, 24});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, constant, 1));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
struct DotOutputFusionLayoutAssignmentResult {
bool layout_assignment_changed_something;
const HloInstruction* dot_lhs_fusion_param;
const HloInstruction* dot_rhs_fusion_param;
const HloInstruction* addend_fusion_param;
};
static absl::StatusOr<DotOutputFusionLayoutAssignmentResult> RunDotOutputFusion(
HloModule* module, const std::string& test_name, int m, int k, int n,
const int64_t dot_operand_idx_in_add) {
DotOutputFusionLayoutAssignmentResult result;
CHECK(dot_operand_idx_in_add == 0 || dot_operand_idx_in_add == 1);
auto builder = HloComputation::Builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
HloInstruction* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
HloInstruction* addend = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_shape, "param1"));
HloInstruction* dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(dot_rhs_shape)));
HloInstruction* dot_result =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
HloInstruction* add_result;
if (dot_operand_idx_in_add == 0) {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, dot_result, addend));
} else {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, addend, dot_result));
}
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion_instruction =
module->entry_computation()->AddInstruction(HloInstruction::CreateFusion(
dot_shape, HloInstruction::FusionKind::kOutput, add_result));
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(add_result, fusion_instruction));
HloInstruction* fused_add =
fusion_instruction->fused_instructions_computation()->root_instruction();
HloInstruction* fused_dot = fusion_instruction->FuseInstruction(dot_result);
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(dot_result));
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_lhs_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
result.dot_lhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(0)->parameter_number());
result.dot_rhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(1)->parameter_number());
result.addend_fusion_param = fusion_instruction->operand(
fused_add->operand(1 - dot_operand_idx_in_add)->parameter_number());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(&computation_layout,
&target_machine_features);
TF_ASSIGN_OR_RETURN(result.layout_assignment_changed_something,
layout_assignment.Run(module));
return result;
}
static void AssertCorrectLayoutForDotOutputFusion(
const HloComputation* computation,
const DotOutputFusionLayoutAssignmentResult& layout_assignment_result,
bool expect_col_major_dot_rhs) {
Layout expected_dot_rhs_layout = expect_col_major_dot_rhs
? LayoutUtil::MakeLayout({0, 1})
: LayoutUtil::MakeLayout({1, 0});
if (layout_assignment_result.dot_rhs_fusion_param->shape().rank() == 1) {
expected_dot_rhs_layout = LayoutUtil::MakeLayout({0});
}
EXPECT_TRUE(LayoutUtil::Equal(
expected_dot_rhs_layout,
layout_assignment_result.dot_rhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.dot_lhs_fusion_param->shape().rank()),
layout_assignment_result.dot_lhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.addend_fusion_param->shape().rank()),
layout_assignment_result.addend_fusion_param->shape().layout()));
EXPECT_THAT(computation->instructions(), Each(Not(op::Copy())));
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, BatchDotLayoutMustBeRowMajor) {
const char* hlo_string = R"(
HloModule BatchDotLayoutMustBeRowMajor
ENTRY BatchDotLayoutMustBeRowMajor {
p0 = f32[10,1,10] parameter(0)
p1 = f32[10,10,1] parameter(1)
ROOT dot = f32[10,1,1] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={2},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 10}, {2, 1, 0}));
*computation_layout.mutable_parameter_layout(1) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 1}, {2, 1, 0}));
*computation_layout.mutable_result_layout() = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {1, 2, 0}));
AssignLayouts(module.get(), &computation_layout);
Shape expected_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {2, 1, 0});
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::ShapeWithLayout(expected_shape)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Dot(
op::ShapeWithLayout(computation_layout.parameter_layout(0).shape()),
op::ShapeWithLayout(
computation_layout.parameter_layout(1).shape()))));
}
}
} | 2,021 |
#ifndef XLA_SERVICE_CPU_CONV_CANONICALIZATION_H_
#define XLA_SERVICE_CPU_CONV_CANONICALIZATION_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace cpu {
class ConvCanonicalization : public HloModulePass {
public:
explicit ConvCanonicalization(
const TargetMachineFeatures* target_machine_features)
: target_machine_features_(*target_machine_features) {}
~ConvCanonicalization() override {}
absl::string_view name() const override {
return "convolution-canonicalization";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const TargetMachineFeatures& target_machine_features_;
};
}
}
#endif
#include "xla/service/cpu/conv_canonicalization.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
absl::StatusOr<bool> ConvCanonicalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloInstruction* hlo :
module->entry_computation()->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*hlo,
target_machine_features_)) {
const ConvolutionDimensionNumbers& dnums =
hlo->convolution_dimension_numbers();
auto input_batch_dim = dnums.input_batch_dimension();
auto input_feature_dim = dnums.input_feature_dimension();
auto kernel_input_feature_dim = dnums.kernel_input_feature_dimension();
auto kernel_output_feature_dim = dnums.kernel_output_feature_dimension();
const int64_t num_spatial_dims = dnums.output_spatial_dimensions_size();
const int64_t num_dims = num_spatial_dims + 2;
HloInstruction* input = hlo->mutable_operand(0);
std::vector<int64_t> new_input_dim_order(num_dims);
std::vector<int64_t> new_input_dims(num_dims);
new_input_dim_order[0] = input_batch_dim;
new_input_dims[0] = input->shape().dimensions(input_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_input_dim_order[i + 1] = dnums.input_spatial_dimensions(i);
new_input_dims[i + 1] =
input->shape().dimensions(dnums.input_spatial_dimensions(i));
}
new_input_dim_order[num_dims - 1] = input_feature_dim;
new_input_dims[num_dims - 1] =
input->shape().dimensions(input_feature_dim);
Shape new_input_shape =
ShapeUtil::MakeShape(input->shape().element_type(), new_input_dims);
HloInstruction* new_input = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_input_shape, input,
new_input_dim_order));
HloInstruction* kernel = hlo->mutable_operand(1);
std::vector<int64_t> new_kernel_dim_order(num_dims);
std::vector<int64_t> new_kernel_dims(num_dims);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_kernel_dim_order[i] = dnums.kernel_spatial_dimensions(i);
new_kernel_dims[i] =
kernel->shape().dimensions(dnums.kernel_spatial_dimensions(i));
}
new_kernel_dim_order[num_dims - 2] = kernel_input_feature_dim;
new_kernel_dims[num_dims - 2] =
kernel->shape().dimensions(kernel_input_feature_dim);
new_kernel_dim_order[num_dims - 1] = kernel_output_feature_dim;
new_kernel_dims[num_dims - 1] =
kernel->shape().dimensions(kernel_output_feature_dim);
Shape new_kernel_shape =
ShapeUtil::MakeShape(kernel->shape().element_type(), new_kernel_dims);
HloInstruction* new_kernel = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_kernel_shape, kernel,
new_kernel_dim_order));
std::vector<int64_t> new_output_dim_order(num_dims);
std::vector<int64_t> new_conv_dims(num_dims);
auto output_batch_dim = dnums.output_batch_dimension();
auto output_feature_dim = dnums.output_feature_dimension();
new_output_dim_order[0] = output_batch_dim;
new_conv_dims[0] = hlo->shape().dimensions(output_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_output_dim_order[i + 1] = dnums.output_spatial_dimensions(i);
new_conv_dims[i + 1] =
hlo->shape().dimensions(dnums.output_spatial_dimensions(i));
}
new_output_dim_order[num_dims - 1] = output_feature_dim;
new_conv_dims[num_dims - 1] = hlo->shape().dimensions(output_feature_dim);
Shape new_conv_shape =
ShapeUtil::MakeShape(hlo->shape().element_type(), new_conv_dims);
ConvolutionDimensionNumbers new_dnums;
new_dnums.set_input_batch_dimension(0);
new_dnums.set_output_batch_dimension(0);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_dnums.add_input_spatial_dimensions(i + 1);
new_dnums.add_kernel_spatial_dimensions(i);
new_dnums.add_output_spatial_dimensions(i + 1);
}
new_dnums.set_input_feature_dimension(num_dims - 1);
new_dnums.set_output_feature_dimension(num_dims - 1);
new_dnums.set_kernel_input_feature_dimension(num_dims - 2);
new_dnums.set_kernel_output_feature_dimension(num_dims - 1);
HloInstruction* new_conv = module->entry_computation()->AddInstruction(
HloInstruction::CreateConvolve(
new_conv_shape, new_input, new_kernel, hlo->feature_group_count(),
hlo->batch_group_count(), hlo->window(), new_dnums,
hlo->precision_config()));
TF_RETURN_IF_ERROR(module->entry_computation()->ReplaceWithNewInstruction(
hlo, HloInstruction::CreateTranspose(
hlo->shape(), new_conv,
InversePermutation(new_output_dim_order))));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/cpu/conv_canonicalization.h"
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
using ::testing::ElementsAre;
class ConvCanonicalizationTest : public HloTestBase {
public:
ConvCanonicalizationTest() {
for (int i = 0; i < 2; ++i) {
auto dim = conv_window_.add_dimensions();
dim->set_size(kWindowSize);
dim->set_stride(1);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
}
protected:
Window conv_window_;
static constexpr int kBatchSize = 50;
static constexpr int kInputSize = 28;
static constexpr int kWindowSize = 5;
static constexpr int kInputFeatureCount = 32;
static constexpr int kOutputFeatureCount = 64;
};
TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kInputFeatureCount, kBatchSize, kInputSize, kInputSize))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kOutputFeatureCount, kInputFeatureCount, kWindowSize, kWindowSize))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(1);
dnums.set_output_batch_dimension(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_kernel_spatial_dimensions(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_output_feature_dimension(0);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kOutputFeatureCount, kBatchSize, output_size, output_size}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_TRUE(conv_canonicalization.Run(module.get()).value());
const HloInstruction* output_reshape = entry_computation->root_instruction();
EXPECT_EQ(HloOpcode::kTranspose, output_reshape->opcode());
const HloInstruction* canonical_conv = output_reshape->operand(0);
EXPECT_EQ(HloOpcode::kConvolution, canonical_conv->opcode());
const HloInstruction* input_reshape = canonical_conv->operand(0);
EXPECT_EQ(HloOpcode::kTranspose, input_reshape->opcode());
const HloInstruction* kernel_reshape = canonical_conv->operand(1);
EXPECT_EQ(HloOpcode::kTranspose, kernel_reshape->opcode());
EXPECT_THAT(input_reshape->dimensions(), ElementsAre(1, 2, 3, 0));
EXPECT_THAT(kernel_reshape->dimensions(), ElementsAre(2, 3, 1, 0));
EXPECT_THAT(output_reshape->dimensions(), ElementsAre(3, 0, 1, 2));
}
TEST_F(ConvCanonicalizationTest, CanonicalStaysTheSame) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kBatchSize, kInputSize, kInputSize, kInputFeatureCount))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kWindowSize, kWindowSize, kInputFeatureCount, kOutputFeatureCount))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kBatchSize, output_size, output_size, kOutputFeatureCount}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_FALSE(conv_canonicalization.Run(module.get()).value());
}
}
} | 2,022 |
#ifndef XLA_SERVICE_GPU_RUNTIME_INFEED_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_INFEED_THUNK_H_
#include <vector>
#include "absl/status/status.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla {
namespace gpu {
class InfeedThunk : public Thunk {
public:
InfeedThunk(ThunkInfo thunk_info, std::vector<ShapedSlice> dest_slices);
InfeedThunk(const InfeedThunk&) = delete;
InfeedThunk& operator=(const InfeedThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
const std::vector<ShapedSlice> dest_slices_;
};
}
}
#endif
#include "xla/service/gpu/runtime/infeed_thunk.h"
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/infeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
InfeedThunk::InfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> dest_slices)
: Thunk(Kind::kInfeed, thunk_info), dest_slices_(std::move(dest_slices)) {}
absl::Status InfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Infeeding to GPU";
ShapeTree<se::DeviceMemoryHandle> source_buffers =
GetOrCreateInfeedManager(stream.parent())->BlockingGetNextDestination();
size_t index = 0;
for (auto& source : source_buffers.leaves()) {
const ShapeIndex& shape_index = source.first;
se::DeviceMemoryHandle& buffer = source.second;
const Shape& source_shape =
ShapeUtil::GetSubshape(source_buffers.shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(dest_slices_[index].shape, source_shape))
<< "Mismatch between infeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_shape)
<< " and infeed dest buffer shape "
<< ShapeUtil::HumanStringWithLayout(dest_slices_[index].shape);
se::DeviceMemoryBase dest_address =
buffer_allocations.GetDeviceAddress(dest_slices_[index++].slice);
TF_RETURN_IF_ERROR(
stream.Memcpy(&dest_address, buffer.memory(), buffer.memory().size()));
}
CHECK_EQ(index, dest_slices_.size())
<< "Infeed did not populate all destination buffers";
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Infeeding to GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/infeed_thunk.h"
#include <memory>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(InfeedThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice infeed_slice(&alloc, 10, 40);
InfeedThunk::InfeedBuffer infeed_buffer = {
infeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
InfeedThunk::Create({"infeed"}, {infeed_buffer}));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(infeed_slice));
BufferAllocation::Slice side_effect_slice(&alloc, 0, 1);
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Write(side_effect_slice));
}
}
} | 2,023 |
#ifndef XLA_SERVICE_GPU_RUNTIME_COPY_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_COPY_THUNK_H_
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class DeviceToDeviceCopyThunk : public Thunk {
public:
DeviceToDeviceCopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size);
DeviceToDeviceCopyThunk(const DeviceToDeviceCopyThunk&) = delete;
DeviceToDeviceCopyThunk& operator=(const DeviceToDeviceCopyThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const BufferAllocation::Slice& source() const { return source_buffer_; }
const BufferAllocation::Slice& destination() const {
return destination_buffer_;
}
uint64_t size_bytes() const { return mem_size_; }
private:
const BufferAllocation::Slice source_buffer_;
const BufferAllocation::Slice destination_buffer_;
const uint64_t mem_size_;
};
class CopyThunk : public Thunk {
public:
class AsyncEvents {
public:
absl::Status Emplace(se::StreamExecutor* executor,
const HloInstruction* instr,
std::unique_ptr<se::Event> event);
absl::StatusOr<std::unique_ptr<se::Event>> Extract(
se::StreamExecutor* executor, const HloInstruction* instr);
private:
using Key = std::pair<se::StreamExecutor*, const HloInstruction*>;
absl::Mutex mutex_;
absl::flat_hash_map<Key, std::unique_ptr<se::Event>> events_
ABSL_GUARDED_BY(mutex_);
};
CopyThunk(ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const BufferAllocation::Slice& source() const { return source_buffer_; }
const BufferAllocation::Slice& destination() const {
return destination_buffer_;
}
uint64_t size_bytes() const { return mem_size_; }
private:
const BufferAllocation::Slice source_buffer_;
const BufferAllocation::Slice destination_buffer_;
const uint64_t mem_size_;
};
class DeviceToHostCopyThunk : public CopyThunk {
public:
DeviceToHostCopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> events,
const HloInstruction* instr);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::shared_ptr<CopyThunk::AsyncEvents> async_events_;
const HloInstruction* instr_;
};
class HostToDeviceCopyThunk : public CopyThunk {
public:
HostToDeviceCopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> events,
const HloInstruction* instr);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::shared_ptr<CopyThunk::AsyncEvents> async_events_;
const HloInstruction* instr_;
};
class CopyDoneThunk : public Thunk {
public:
CopyDoneThunk(Thunk::Kind kind, ThunkInfo thunk_info,
std::shared_ptr<CopyThunk::AsyncEvents> events,
const HloInstruction* copy_start_instr);
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::shared_ptr<CopyThunk::AsyncEvents> async_events_;
const HloInstruction* copy_start_instr_;
};
}
}
#endif
#include "xla/service/gpu/runtime/copy_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
DeviceToDeviceCopyThunk::DeviceToDeviceCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size)
: Thunk(Kind::kCopy, std::move(thunk_info)),
source_buffer_(source_buffer),
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
absl::Status DeviceToDeviceCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination_buffer_);
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source_buffer_);
VLOG(3) << "Memcpy D2D of size " << mem_size_ << " from "
<< source_data.opaque() << " to " << destination_data.opaque();
return params.stream->Memcpy(&destination_data, source_data, mem_size_);
}
CopyThunk::CopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size)
: Thunk(Kind::kCopy, std::move(thunk_info)),
source_buffer_(source_buffer),
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
absl::Status CopyThunk::ExecuteOnStream(const ExecuteParams& params) {
return absl::OkStatus();
}
absl::Status CopyThunk::AsyncEvents::Emplace(se::StreamExecutor* executor,
const HloInstruction* instr,
std::unique_ptr<se::Event> event) {
Key key = {executor, instr};
absl::MutexLock lock(&mutex_);
VLOG(3) << "Emplace event " << event.get();
if (auto [it, inserted] = events_.try_emplace(key, std::move(event));
inserted) {
return absl::OkStatus();
}
return absl::InternalError("Async copy event already exists!");
}
absl::StatusOr<std::unique_ptr<se::Event>> CopyThunk::AsyncEvents::Extract(
se::StreamExecutor* executor, const HloInstruction* instr) {
Key key = {executor, instr};
absl::MutexLock lock(&mutex_);
if (auto event = events_.extract(key)) {
VLOG(3) << "Extract event " << event.mapped().get();
return std::move(event.mapped());
}
return absl::InternalError("Async copy event was not found!");
}
DeviceToHostCopyThunk::DeviceToHostCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* instr)
: CopyThunk(std::move(thunk_info), source_buffer, destination_buffer,
mem_size),
async_events_(std::move(async_events)),
instr_(instr) {}
absl::Status DeviceToHostCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination());
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source());
void* cpu_dst = destination_data.opaque();
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
TF_RETURN_IF_ERROR(stream->Memcpy(cpu_dst, source_data, size_bytes()));
if (stream == params.stream) {
VLOG(2) << "Memcpy D2H from the main stream";
return absl::OkStatus();
}
VLOG(2) << "Memcpy D2H from the other stream";
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(auto event, executor->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(event.get()));
VLOG(3) << "Emplace events: " << event.get()
<< " for instr: " << instr_->ToString();
return async_events_->Emplace(executor, instr_, std::move(event));
}
HostToDeviceCopyThunk::HostToDeviceCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* instr)
: CopyThunk(std::move(thunk_info), source_buffer, destination_buffer,
mem_size),
async_events_(std::move(async_events)),
instr_(instr) {}
absl::Status HostToDeviceCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination());
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source());
void* cpu_src = source_data.opaque();
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
TF_RETURN_IF_ERROR(stream->Memcpy(&destination_data, cpu_src, size_bytes()));
if (stream == params.stream) {
VLOG(2) << "Memcpy H2D from the main stream";
return absl::OkStatus();
}
VLOG(2) << "Memcpy H2D from the other stream";
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(auto event, executor->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(event.get()));
VLOG(3) << "Emplace events: " << event.get()
<< " for instr: " << instr_->ToString();
return async_events_->Emplace(executor, instr_, std::move(event));
}
CopyDoneThunk::CopyDoneThunk(
Thunk::Kind kind, ThunkInfo thunk_info,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* copy_start_instr)
: Thunk(kind, std::move(thunk_info)),
async_events_(std::move(async_events)),
copy_start_instr_(copy_start_instr) {}
absl::Status CopyDoneThunk::ExecuteOnStream(const ExecuteParams& params) {
VLOG(3) << "CopyDone thunk between a host and a device for: "
<< copy_start_instr_->ToString();
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Event> event,
async_events_->Extract(executor, copy_start_instr_));
return params.stream->WaitFor(event.get());
}
}
} | #include "xla/service/cpu/runtime/copy_thunk.h"
#include <cstddef>
#include <vector>
#include "xla/layout_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(CopyThunkTest, CopySameShape) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> src = {1.0, 2.0, 3.0, 4.0};
std::vector<float> dst(4, 0.0);
size_t size_in_bytes = src.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(src.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, size_in_bytes, 0);
BufferAllocation dst_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, size_in_bytes);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, size_in_bytes);
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, shape, dst_slice, shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(src, dst);
}
TEST(CopyThunkTest, CopyTransposed) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> src = {1.0, 2.0, 3.0, 4.0};
std::vector<float> dst(4, 0.0);
size_t size_in_bytes = src.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(src.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, size_in_bytes, 0);
BufferAllocation dst_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, size_in_bytes);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, size_in_bytes);
Shape src_shape = ShapeUtil::MakeShape(F32, {2, 2});
*src_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
Shape dst_shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, src_shape, dst_slice, dst_shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {1.0, 3.0, 2.0, 4.0};
EXPECT_EQ(expected, dst);
}
}
} | 2,024 |
#ifndef XLA_SERVICE_GPU_RUNTIME_CONVOLUTION_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_CONVOLUTION_THUNK_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class ConvolutionThunk : public Thunk {
public:
ConvolutionThunk(ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice);
ConvolutionThunk(const ConvolutionThunk&) = delete;
ConvolutionThunk& operator=(const ConvolutionThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::vector<BufferAllocation::Slice> operand_buffers_;
std::vector<BufferAllocation::Slice> result_buffers_;
BufferAllocation::Slice scratch_buffer_;
GenericConvRunner& GetOrCreateRunner(const stream_executor::Stream* stream,
bool* runner_created);
const GpuConvConfig config_;
absl::Mutex mu_;
absl::flat_hash_map<const stream_executor::Stream*,
std::unique_ptr<GenericConvRunner>>
runner_cache_ ABSL_GUARDED_BY(mu_);
};
class ConvolutionReorderThunk : public Thunk {
public:
ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices);
ConvolutionReorderThunk(const ConvolutionReorderThunk&) = delete;
ConvolutionReorderThunk& operator=(const ConvolutionReorderThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
static se::dnn::FilterDescriptor CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw);
const se::dnn::FilterDescriptor filter_descriptor_;
absl::InlinedVector<BufferAllocation::Slice, 2> operand_buffers_;
absl::InlinedVector<BufferAllocation::Slice, 2> result_buffers_;
};
}
}
#endif
#include "xla/service/gpu/runtime/convolution_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#if TENSORFLOW_USE_ROCM
#include "xla/service/gpu/stream_executor_util.h"
#endif
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
ConvolutionThunk::ConvolutionThunk(
ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice)
: Thunk(Kind::kConvolution, thunk_info),
operand_buffers_(std::move(operand_slices)),
result_buffers_(std::move(result_slices)),
scratch_buffer_(scratch_slice),
config_(std::move(config)) {}
GenericConvRunner& ConvolutionThunk::GetOrCreateRunner(
const stream_executor::Stream* stream, bool* runner_created) {
absl::MutexLock lock(&mu_);
auto it = runner_cache_.find(stream);
*runner_created = (it == runner_cache_.end());
if (*runner_created) {
it = runner_cache_
.insert({stream, std::make_unique<GenericConvRunner>(config_)})
.first;
}
return *it->second;
}
absl::Status ConvolutionThunk::ExecuteOnStream(const ExecuteParams& params) {
const auto& buffer_allocations = *params.buffer_allocations;
std::vector<se::DeviceMemoryBase> operand_se_buffers, result_se_buffers;
operand_se_buffers.reserve(operand_buffers_.size());
for (BufferAllocation::Slice buffer : operand_buffers_) {
operand_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
result_se_buffers.reserve(result_buffers_.size());
for (BufferAllocation::Slice buffer : result_buffers_) {
result_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
se::DeviceMemoryBase scratch =
buffer_allocations.GetDeviceAddress(scratch_buffer_);
bool runner_created = false;
RunConvOptions opts;
opts.runner_cache = &GetOrCreateRunner(params.stream, &runner_created);
#if TENSORFLOW_USE_ROCM
if (runner_created) {
TF_ASSIGN_OR_RETURN(
GpuConvParams conv_params,
GetGpuConvParams(config_, operand_se_buffers, result_se_buffers));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config_.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config_.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config_.output_type));
TF_ASSIGN_OR_RETURN(auto dnn,
se::dnn::internal::GetDnnFromStream(params.stream));
se::OwningScratchAllocator<> scratch_allocator(
buffer_allocations.device_ordinal(),
buffer_allocations.memory_allocator());
std::vector<se::dnn::ProfileResult> profile_results;
dnn->GetMIOpenConvolveAlgorithms(
kind, input_type, output_type, params.stream, config_.input_descriptor,
conv_params.input_buf, config_.filter_descriptor,
conv_params.filter_buf, config_.output_descriptor,
conv_params.output_buf, config_.conv_desc, &scratch_allocator,
&profile_results);
}
#endif
TF_RETURN_IF_ERROR(RunGpuConv(config_, absl::MakeSpan(operand_se_buffers),
absl::MakeSpan(result_se_buffers), scratch,
params.stream, opts));
if (!params.stream->ok()) {
return Internal("ConvolutionThunk::ExecuteOnStream failed.");
}
return absl::OkStatus();
}
ConvolutionReorderThunk::ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices)
: Thunk(Kind::kConvolutionReorder, thunk_info),
filter_descriptor_(CreateFilterDescriptor(filter_nchw)),
operand_buffers_(operand_slices),
result_buffers_(result_slices) {}
absl::Status ConvolutionReorderThunk::ExecuteOnStream(
const ExecuteParams& params) {
bool has_bias = operand_buffers_.size() > 1;
CHECK_EQ(operand_buffers_.size(), result_buffers_.size());
const auto& buffer_allocations = *params.buffer_allocations;
auto filter_input = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(operand_buffers_[0]));
auto filter_output = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(result_buffers_[0]));
auto bias_input =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(operand_buffers_[1])))
: std::nullopt;
auto bias_output =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(result_buffers_[1])))
: std::nullopt;
auto dnn = params.stream->parent()->AsDnn();
if (dnn == nullptr) {
return absl::InternalError("No DNN for stream.");
}
return dnn->CudnnReorderConvolutionFilterAndBias(
params.stream, filter_descriptor_, filter_input, &filter_output,
std::move(bias_input), std::move(bias_output));
}
se::dnn::FilterDescriptor ConvolutionReorderThunk::CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw) {
CHECK_EQ(filter_nchw.size(), 4);
se::dnn::FilterDescriptor filter_desc(2);
filter_desc.set_layout(se::dnn::FilterLayout::kOutputInputYX32);
filter_desc.set_output_feature_map_count(filter_nchw[0]);
filter_desc.set_input_feature_map_count(filter_nchw[1]);
filter_desc.set_input_filter_height(filter_nchw[2]);
filter_desc.set_input_filter_width(filter_nchw[3]);
return filter_desc;
}
}
} | #include "xla/service/cpu/runtime/convolution_thunk.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "Eigen/Core"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
struct ConvolutionDimensions {
int batch_size = 1;
int input_size = 3;
int input_channels = 5;
int kernel_size = 3;
int output_channels = 3;
int output_size = input_size - kernel_size + 1;
};
template <typename T>
class ConvolutionThunkTypedTest : public ::testing::Test {};
using CorrectTypes = ::testing::Types<float, Eigen::half>;
TYPED_TEST_SUITE(ConvolutionThunkTypedTest, CorrectTypes);
std::vector<int64_t> MakeInputDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> input_dims = {dims.batch_size};
for (int i = 0; i < convolution_rank; ++i) {
input_dims.push_back(dims.input_size);
}
input_dims.push_back(dims.input_channels);
return input_dims;
}
std::vector<int64_t> MakeKernelDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> kernel_dims = {};
for (int i = 0; i < convolution_rank; ++i) {
kernel_dims.push_back(dims.kernel_size);
}
kernel_dims.push_back(dims.input_channels);
kernel_dims.push_back(dims.output_channels);
return kernel_dims;
}
std::vector<int64_t> MakeOutputDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> output_dims = {dims.batch_size};
for (int i = 0; i < convolution_rank; ++i) {
output_dims.push_back(dims.output_size);
}
output_dims.push_back(dims.output_channels);
return output_dims;
}
template <typename ElementType>
std::vector<ElementType> MakeDataVector(const std::vector<int64_t>& dims) {
auto size = absl::c_accumulate(dims, 1, std::multiplies<int>());
return std::vector<ElementType>(size, ElementType(0.0));
}
template <typename ElementType>
std::vector<MaybeOwningDeviceMemory> MakeBuffers(
const std::vector<ElementType>& input,
const std::vector<ElementType>& kernel,
const std::vector<ElementType>& output) {
std::vector<MaybeOwningDeviceMemory> buffers;
size_t input_size_in_bytes = input.size() * sizeof(ElementType);
buffers.emplace_back(se::DeviceMemoryBase(input.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(kernel.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(output.data(), output_size_in_bytes));
return buffers;
}
ConvolutionThunk::Options MakeConvolutionOptions() {
ConvolutionThunk::Options options;
options.multi_threaded = false;
options.use_acl = false;
return options;
}
ConvolutionDimensionNumbers MakeConvolutionDimensionNumbers(
int convolution_rank) {
ConvolutionDimensionNumbers dnums;
int dim = 0;
dnums.set_input_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_input_spatial_dimensions(dim++);
}
dnums.set_input_feature_dimension(dim++);
dim = 0;
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_kernel_spatial_dimensions(dim++);
}
dnums.set_kernel_input_feature_dimension(dim++);
dnums.set_kernel_output_feature_dimension(dim++);
dim = 0;
dnums.set_output_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_output_spatial_dimensions(dim++);
}
dnums.set_output_feature_dimension(dim++);
return dnums;
}
Window MakeWindow(int convolution_rank) {
Window window;
for (int i = 0; i < convolution_rank; ++i) {
WindowDimension* window_dim = window.add_dimensions();
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
return window;
}
template <typename ElementType>
class ConvolutionThunkBuilder {
public:
auto Build(int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
auto input_dims = MakeInputDims(convolution_rank, dims);
auto kernel_dims = MakeKernelDims(convolution_rank, dims);
auto output_dims = MakeOutputDims(convolution_rank, dims);
input_ = MakeDataVector<ElementType>(input_dims);
kernel_ = MakeDataVector<ElementType>(kernel_dims);
output_ = MakeDataVector<ElementType>(output_dims);
size_t input_size_in_bytes = input_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(input_.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(kernel_.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(output_.data(), output_size_in_bytes));
allocations_ = std::make_unique<BufferAllocations>(buffers_);
input_alloc_ =
std::make_unique<BufferAllocation>(0, input_size_in_bytes, 0);
kernel_alloc_ =
std::make_unique<BufferAllocation>(1, kernel_size_in_bytes, 0);
output_alloc_ =
std::make_unique<BufferAllocation>(2, output_size_in_bytes, 0);
BufferAllocation::Slice input_slice(input_alloc_.get(), 0,
input_size_in_bytes);
BufferAllocation::Slice kernel_slice(kernel_alloc_.get(), 0,
kernel_size_in_bytes);
BufferAllocation::Slice output_slice(output_alloc_.get(), 0,
output_size_in_bytes);
auto primitive_type = primitive_util::NativeToPrimitiveType<ElementType>();
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_dims);
Shape kernel_shape = ShapeUtil::MakeShape(primitive_type, kernel_dims);
Shape output_shape = ShapeUtil::MakeShape(primitive_type, output_dims);
auto options = MakeConvolutionOptions();
auto dnums = MakeConvolutionDimensionNumbers(convolution_rank);
auto window = MakeWindow(convolution_rank);
return ConvolutionThunk::Create(
{"convolution"}, options, std::move(input_slice), input_shape,
std::move(kernel_slice), kernel_shape, std::move(output_slice),
output_shape, dnums, window,
1);
}
auto GetExecutionParams() {
return Thunk::ExecuteParams{nullptr, allocations_.get()};
}
private:
std::vector<ElementType> input_;
std::vector<ElementType> kernel_;
std::vector<ElementType> output_;
std::vector<MaybeOwningDeviceMemory> buffers_;
std::unique_ptr<BufferAllocations> allocations_;
std::unique_ptr<BufferAllocation> input_alloc_;
std::unique_ptr<BufferAllocation> kernel_alloc_;
std::unique_ptr<BufferAllocation> output_alloc_;
};
template <typename ElementType>
void SuccessfulConvolution(int convolution_rank) {
ConvolutionThunkBuilder<ElementType> builder;
TF_ASSERT_OK_AND_ASSIGN(auto thunk, builder.Build(convolution_rank))
Thunk::ExecuteParams params = builder.GetExecutionParams();
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError()) << execute_event.GetError();
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution1D) {
SuccessfulConvolution<TypeParam>(1);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution2D) {
SuccessfulConvolution<TypeParam>(2);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution3D) {
SuccessfulConvolution<TypeParam>(3);
}
TEST(ConvolutionThunkTest, CreationErrorOnUnsupportedType) {
ConvolutionThunkBuilder<int> builder;
auto status_or_thunk = builder.Build(2);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Unsupported element type (S32)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnIncorrectConvolutionRank) {
ConvolutionThunkBuilder<float> builder;
auto status_or_thunk = builder.Build(4);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Incorrect convolution rank (4)"));
}
}
} | 2,025 |
#ifndef XLA_SERVICE_GPU_RUNTIME_OUTFEED_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_OUTFEED_THUNK_H_
#include <vector>
#include "absl/status/status.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla {
namespace gpu {
class OutfeedThunk : public Thunk {
public:
OutfeedThunk(ThunkInfo thunk_info, std::vector<ShapedSlice> source_slices);
OutfeedThunk(const OutfeedThunk&) = delete;
OutfeedThunk& operator=(const OutfeedThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
const std::vector<ShapedSlice> source_slices_;
};
}
}
#endif
#include "xla/service/gpu/runtime/outfeed_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/outfeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
OutfeedThunk::OutfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> source_slices)
: Thunk(Kind::kOutfeed, thunk_info),
source_slices_(std::move(source_slices)) {}
absl::Status OutfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Outfeeding from GPU";
OutfeedManager* outfeed_manager = GetOrCreateOutfeedManager(stream.parent());
ShapeTree<std::unique_ptr<OutfeedBuffer>>* output_buffers =
outfeed_manager->BlockingGetNextDestination();
if (source_slices_.empty()) {
return absl::OkStatus();
}
const int64_t leaf_count = output_buffers->leaf_count();
TF_RET_CHECK(source_slices_.size() == leaf_count)
<< "Mismatch between number of outfeed inputs (" << source_slices_.size()
<< ") and outputs (" << leaf_count << ")";
auto output_leaf_it = output_buffers->leaf_begin();
for (int64_t index = 0; index < leaf_count; ++index) {
const ShapeIndex& shape_index = output_leaf_it->first;
std::unique_ptr<OutfeedBuffer>& buffer = output_leaf_it->second;
++output_leaf_it;
const Shape& output_shape =
ShapeUtil::GetSubshape(output_buffers->shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(source_slices_[index].shape, output_shape))
<< "Mismatch between outfeed output buffer shape "
<< ShapeUtil::HumanStringWithLayout(output_shape)
<< " and outfeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_slices_[index].shape);
BufferAllocation::Slice source_slice = source_slices_[index].slice;
if (!source_slice.allocation())
return Internal("outfeed source missing buffer allocation");
se::DeviceMemoryBase data_address =
buffer_allocations.GetDeviceAddress(source_slice);
TF_RETURN_IF_ERROR(stream.Memcpy(buffer->destination()->untyped_data(),
data_address, buffer->length()));
TF_RETURN_IF_ERROR(stream.DoHostCallback([&buffer]() { buffer->Done(); }));
}
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Outfeeding from GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/outfeed_thunk.h"
#include <memory>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(OutfeedThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice outfeed_slice(&alloc, 10, 40);
OutfeedThunk::OutfeedBuffer outfeed_buffer = {
outfeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
OutfeedThunk::Create({"outfeed"}, {outfeed_buffer}));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(outfeed_slice));
BufferAllocation::Slice side_effect_slice(&alloc, 0, 1);
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Write(side_effect_slice));
}
}
} | 2,026 |
#ifndef XLA_SERVICE_GPU_RUNTIME_WHILE_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_WHILE_THUNK_H_
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class WhileThunk : public Thunk {
public:
WhileThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& condition_result_buffer_index,
std::unique_ptr<SequentialThunk> condition_thunk_sequence,
std::unique_ptr<SequentialThunk> body_thunk_sequence,
std::optional<int64_t> trip_count = std::nullopt);
WhileThunk(const WhileThunk&) = delete;
WhileThunk& operator=(const WhileThunk&) = delete;
absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
SequentialThunk* condition_thunk_sequence() const {
return condition_thunk_sequence_.get();
}
SequentialThunk* body_thunk_sequence() const {
return body_thunk_sequence_.get();
}
const BufferAllocation::Slice& condition_result_buffer() const {
return condition_result_buffer_index_;
}
static absl::StatusOr<int64_t> CurrentLoopIteration(int64_t depth = 0);
private:
const BufferAllocation::Slice condition_result_buffer_index_;
std::unique_ptr<SequentialThunk> condition_thunk_sequence_;
std::unique_ptr<SequentialThunk> body_thunk_sequence_;
std::optional<int64_t> trip_count_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*,
std::unique_ptr<se::MemoryAllocation>>
predicates_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "xla/service/gpu/runtime/while_thunk.h"
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static std::list<int64_t>& LoopCounters() {
static thread_local std::list<int64_t> loop_counters;
return loop_counters;
}
absl::StatusOr<int64_t> WhileThunk::CurrentLoopIteration(int64_t depth) {
if (depth >= LoopCounters().size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Loop depth %d is greater than the number of tracked loops %d", depth,
LoopCounters().size()));
}
auto counter = LoopCounters().begin();
std::advance(counter, depth);
return *counter;
}
WhileThunk::WhileThunk(
ThunkInfo thunk_info,
const BufferAllocation::Slice& condition_result_buffer_index,
std::unique_ptr<SequentialThunk> condition_thunk_sequence,
std::unique_ptr<SequentialThunk> body_thunk_sequence,
std::optional<int64_t> trip_count)
: Thunk(Kind::kWhile, thunk_info),
condition_result_buffer_index_(condition_result_buffer_index),
condition_thunk_sequence_(std::move(condition_thunk_sequence)),
body_thunk_sequence_(std::move(body_thunk_sequence)),
trip_count_(trip_count) {}
absl::Status WhileThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
TF_RETURN_IF_ERROR(
condition_thunk_sequence_->Prepare(params, resource_requests));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Prepare(params, resource_requests));
return absl::OkStatus();
}
absl::Status WhileThunk::Initialize(const InitializeParams& params) {
TF_RETURN_IF_ERROR(condition_thunk_sequence_->Initialize(params));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Initialize(params));
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(sizeof(bool)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status WhileThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
int64_t& iter = LoopCounters().emplace_front();
absl::Cleanup cleanup = [&] { LoopCounters().pop_front(); };
se::DeviceMemoryBase condition_result_data =
params.buffer_allocations->GetDeviceAddress(
condition_result_buffer_index_);
if (trip_count_.has_value()) {
VLOG(2) << "Executing WhileThunk for " << *trip_count_ << " iterations";
for (iter = 0; iter < trip_count_; ++iter) {
VLOG(3) << "Executing iteration # " << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
}
return absl::OkStatus();
}
bool* condition_result = [&] {
absl::MutexLock lock(&mutex_);
return reinterpret_cast<bool*>(predicates_.at(stream.parent())->opaque());
}();
while (true) {
VLOG(3) << "Executing WhileThunk condition computation; iter=" << iter;
TF_RETURN_IF_ERROR(condition_thunk_sequence_->ExecuteOnStream(params));
TF_RETURN_IF_ERROR(
stream.Memcpy(condition_result, condition_result_data, sizeof(bool)));
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return absl::InternalError(absl::StrFormat(
"Failed to complete all kernels launched on stream %p: %s", &stream,
blocked.message()));
}
VLOG(3) << "condition_result = " << *condition_result;
if (!*condition_result) {
VLOG(3) << "Break WhileThunk loop; iter=" << iter;
break;
}
VLOG(3) << "Executing WhileThunk body computation; iter=" << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
++iter;
}
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/while_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/cpu/runtime/thunk_testlib.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(WhileThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice predicate_slice(&alloc, 0, sizeof(int32_t));
BufferAllocation::Slice cond_read_slice(&alloc, 10, 10);
BufferAllocation::Slice body_read_slice(&alloc, 20, 10);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(cond_read_slice)));
ThunkSequence body_sequence;
body_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(body_read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, predicate_slice, std::move(cond_sequence),
std::move(body_sequence)));
EXPECT_EQ(thunk->buffer_uses().size(), 3);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(predicate_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(cond_read_slice));
EXPECT_EQ(thunk->buffer_uses()[2], BufferUse::Read(body_read_slice));
}
}
} | 2,027 |
#ifndef XLA_SERVICE_CPU_RUNTIME_THUNK_EXECUTOR_H_
#define XLA_SERVICE_CPU_RUNTIME_THUNK_EXECUTOR_H_
#include <atomic>
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/tsl/concurrency/async_value_ref.h"
namespace xla::cpu {
class ThunkExecutor {
public:
using BufferUses = Thunk::BufferUses;
using ExecuteEvent = Thunk::ExecuteEvent;
using Task = absl::AnyInvocable<void()>;
using TaskRunner = absl::AnyInvocable<void(Task)>;
using NodeId = int64_t;
static constexpr NodeId kInvalidNodeId = std::numeric_limits<NodeId>::min();
ThunkExecutor(ThunkExecutor&&) = default;
ThunkExecutor& operator=(ThunkExecutor&&) = default;
static absl::StatusOr<ThunkExecutor> Create(ThunkSequence thunk_sequence);
struct NodeDef {
NodeId id = kInvalidNodeId;
std::vector<NodeId> in_edges;
std::vector<NodeId> out_edges;
};
tsl::AsyncValueRef<ExecuteEvent> Execute(const Thunk::ExecuteParams& params,
TaskRunner runner = nullptr);
absl::Span<const NodeDef> nodes_defs() const { return nodes_defs_; }
const NodeDef& node_def(NodeId id) const { return nodes_defs_[id]; }
absl::Span<const NodeId> source() const { return source_; }
absl::Span<const NodeId> sink() const { return sink_; }
BufferUses buffer_uses() const { return thunk_sequence_.buffer_uses(); }
std::string ToString() const;
bool is_sequential() const { return is_sequential_; }
private:
using ReadyQueue = absl::InlinedVector<NodeId, 8>;
ThunkExecutor(ThunkSequence thunk_sequence, std::vector<NodeDef> nodes_defs);
struct Node {
NodeId id = kInvalidNodeId;
std::atomic<int64_t>* counter = nullptr;
const std::vector<NodeId>* out_edges = nullptr;
};
struct ExecuteState {
ExecuteState(ThunkExecutor* executor, TaskRunner runner);
ThunkExecutor* executor;
TaskRunner runner;
absl::FixedArray<std::atomic<int64_t>> counters;
absl::InlinedVector<Node, 32> nodes;
std::atomic<bool> abort;
absl::Mutex abort_mutex;
absl::Status abort_status ABSL_GUARDED_BY(abort_mutex);
std::atomic<int64_t> pending_sink_nodes;
tsl::AsyncValueRef<ExecuteEvent> execute_event;
};
tsl::AsyncValueRef<ExecuteEvent> ExecuteSequential(
const Thunk::ExecuteParams& params);
void ResumeExecuteSequential(int64_t index,
const Thunk::ExecuteParams& params,
tsl::AsyncValueRef<ExecuteEvent> event);
void Execute(ExecuteState* state, const Thunk::ExecuteParams& params,
ReadyQueue ready_queue);
void ProcessOutEdges(ExecuteState* state,
tsl::AsyncValuePtr<Thunk::ExecuteEvent> node_event,
Node& node, ReadyQueue& ready_queue);
int64_t TransitiveReduction();
ThunkSequence thunk_sequence_;
std::vector<NodeDef> nodes_defs_;
std::vector<NodeId> source_;
std::vector<NodeId> sink_;
bool is_sequential_;
};
}
#endif
#include "xla/service/cpu/runtime/thunk_executor.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
ThunkExecutor::ThunkExecutor(ThunkSequence thunk_sequence,
std::vector<NodeDef> nodes_defs)
: thunk_sequence_(std::move(thunk_sequence)),
nodes_defs_(std::move(nodes_defs)),
is_sequential_(true) {
for (NodeId i = 0; i < nodes_defs_.size(); ++i) {
if (nodes_defs_[i].in_edges.empty()) {
source_.push_back(i);
}
if (nodes_defs_[i].out_edges.empty()) {
sink_.push_back(i);
}
}
int64_t num_erased_edges = TransitiveReduction();
for (NodeId i = 1; i < nodes_defs_.size() && is_sequential_; ++i) {
is_sequential_ &= (absl::c_count(nodes_defs_[i].in_edges, i - 1) != 0);
}
VLOG(2) << absl::StreamFormat(
"Constructed ThunkExecutor with %d nodes: #source_nodes=%d "
"#sink_nodes=%d, #erased_edges=%d, is_sequential=%v",
nodes_defs_.size(), source_.size(), sink_.size(), num_erased_edges,
is_sequential_);
DCHECK((!source_.empty() && !sink_.empty() && !thunk_sequence_.empty()) ||
(source_.empty() && sink_.empty() && thunk_sequence_.empty()));
}
absl::StatusOr<ThunkExecutor> ThunkExecutor::Create(
ThunkSequence thunk_sequence) {
std::vector<NodeDef> defs(thunk_sequence.size());
std::vector<BufferUse::ReadWriteSet> rwsets(thunk_sequence.size());
std::vector<Thunk::BufferUses> buffer_uses(thunk_sequence.size());
for (NodeId i = 0; i < thunk_sequence.size(); ++i) {
defs[i].id = i;
Thunk& thunk = *thunk_sequence[i];
rwsets[i].AddAll(thunk.buffer_uses());
for (NodeId j = i - 1; j >= 0; --j) {
if (rwsets[j].HasConflicts(rwsets[i])) {
defs[j].out_edges.push_back(i);
defs[i].in_edges.push_back(j);
}
}
}
return ThunkExecutor(std::move(thunk_sequence), std::move(defs));
}
ThunkExecutor::ExecuteState::ExecuteState(ThunkExecutor* executor,
TaskRunner runner)
: executor(executor),
runner(std::move(runner)),
counters(executor->nodes_defs().size()),
nodes(executor->nodes_defs().size()),
abort(false),
pending_sink_nodes(executor->sink().size()),
execute_event(tsl::MakeConstructedAsyncValueRef<ExecuteEvent>()) {
for (NodeId id = 0; id < nodes.size(); ++id) {
const NodeDef& node_def = executor->node_def(id);
counters[id].store(node_def.in_edges.size(), std::memory_order_release);
nodes[id] = Node{id, &counters[id], &node_def.out_edges};
}
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent> ThunkExecutor::Execute(
const Thunk::ExecuteParams& params, TaskRunner runner) {
if (ABSL_PREDICT_FALSE(thunk_sequence_.empty())) {
return Thunk::OkExecuteEvent();
}
if (ABSL_PREDICT_FALSE(thunk_sequence_.size() == 1)) {
return thunk_sequence_[0]->Execute(params);
}
if (is_sequential_) {
return ExecuteSequential(params);
}
auto state = std::make_unique<ExecuteState>(this, std::move(runner));
Execute(state.get(), params, ReadyQueue(source_.begin(), source_.end()));
auto execute_event = state->execute_event;
execute_event.AndThen([state = std::move(state)] {
CHECK_EQ(state->pending_sink_nodes.load(std::memory_order_acquire), 0)
<< "All sink nodes must be completed before execute_event is marked "
"available.";
});
return execute_event;
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent>
ThunkExecutor::ExecuteSequential(const Thunk::ExecuteParams& params) {
for (int64_t i = 0; i < thunk_sequence_.size(); ++i) {
Thunk& thunk = *thunk_sequence_[i];
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
execute_event.AndThen([this, ¶ms, i, event](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(i + 1, params, std::move(event));
}
});
return event;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
return execute_event;
}
}
return Thunk::OkExecuteEvent();
}
void ThunkExecutor::ResumeExecuteSequential(
int64_t index, const Thunk::ExecuteParams& params,
tsl::AsyncValueRef<ExecuteEvent> event) {
for (int64_t i = index; i < thunk_sequence_.size(); ++i) {
Thunk& thunk = *thunk_sequence_[i];
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
execute_event.AndThen(
[this, ¶ms, i, event = std::move(event)](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(i + 1, params, std::move(event));
}
});
return;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
event.SetError(execute_event.GetError());
return;
}
}
event.SetStateConcrete();
}
void ThunkExecutor::Execute(ExecuteState* state,
const Thunk::ExecuteParams& params,
ReadyQueue ready_queue) {
tsl::profiler::TraceMe trace("ThunkExecutor::Execute");
if (ready_queue.empty()) return;
bool has_runner = state->runner != nullptr;
for (int64_t i = 0; i < ready_queue.size(); ++i) {
NodeId id = ready_queue[i];
Node& node = state->nodes[id];
int64_t cnt = node.counter->load(std::memory_order_acquire);
CHECK_EQ(cnt, 0) << "Node counter must be 0";
if (has_runner && i < ready_queue.size() - 1) {
ReadyQueue tail(ready_queue.begin() + i + 1, ready_queue.end());
ready_queue.erase(ready_queue.begin() + i + 1, ready_queue.end());
state->runner([¶ms, state, tail = std::move(tail)]() mutable {
state->executor->Execute(state, params, std::move(tail));
});
}
Thunk& thunk = *state->executor->thunk_sequence_[id];
auto execute_event = state->abort.load(std::memory_order_relaxed)
? Thunk::OkExecuteEvent()
: thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
execute_event.AndThen([&, state, execute_event = execute_event.AsPtr()] {
ReadyQueue ready_queue;
ProcessOutEdges(state, execute_event, node, ready_queue);
Execute(state, params, std::move(ready_queue));
});
} else {
ProcessOutEdges(state, execute_event.AsPtr(), node, ready_queue);
}
}
}
void ThunkExecutor::ProcessOutEdges(
ExecuteState* state, tsl::AsyncValuePtr<Thunk::ExecuteEvent> node_event,
Node& node, ReadyQueue& ready_queue) {
if (ABSL_PREDICT_FALSE(node_event.IsError())) {
absl::MutexLock lock(&state->abort_mutex);
state->abort = true;
state->abort_status.Update(node_event.GetError());
}
bool is_sink = node.out_edges->empty();
for (NodeId out_edge : *node.out_edges) {
Node& out_node = state->nodes[out_edge];
int64_t cnt = out_node.counter->fetch_sub(1, std::memory_order_release);
CHECK_GE(cnt, 1) << "Node counter can't drop below 0";
if (cnt == 1) ready_queue.push_back(out_edge);
}
if (ABSL_PREDICT_FALSE(is_sink)) {
bool is_done =
state->pending_sink_nodes.fetch_sub(1, std::memory_order_acq_rel) == 1;
if (ABSL_PREDICT_TRUE(!is_done)) return;
if (ABSL_PREDICT_FALSE(state->abort.load(std::memory_order_relaxed))) {
auto take_error = [&] {
absl::MutexLock lock(&state->abort_mutex);
CHECK(!state->abort_status.ok())
<< "Abort status must be set if execution is aborted";
return std::move(state->abort_status);
};
state->execute_event.SetError(take_error());
} else {
state->execute_event.SetStateConcrete();
}
}
}
int64_t ThunkExecutor::TransitiveReduction() {
int64_t num_erased_edges = 0;
auto erase_edge = [&](NodeDef& from, NodeDef& to) {
auto out_edge_it = absl::c_find(from.out_edges, to.id);
auto in_edge_it = absl::c_find(to.in_edges, from.id);
bool has_out_edge = out_edge_it != from.out_edges.end();
bool has_in_edge = in_edge_it != to.in_edges.end();
DCHECK_EQ(has_out_edge, has_in_edge) << "Edges must be symmetric";
if (has_out_edge && has_in_edge) {
from.out_edges.erase(out_edge_it);
to.in_edges.erase(in_edge_it);
++num_erased_edges;
}
};
std::vector<int64_t> stack;
std::vector<bool> visited;
auto add_to_stack = [&](int64_t node_id) {
if (!visited[node_id]) {
stack.push_back(node_id);
visited[node_id] = true;
}
};
for (int64_t i = 0; i < nodes_defs_.size(); ++i) {
NodeDef& source_node = nodes_defs_[i];
stack.clear();
visited.assign(nodes_defs_.size(), false);
for (int64_t out_id : source_node.out_edges) {
NodeDef& out_node = nodes_defs_[out_id];
for (int64_t start_id : out_node.out_edges) add_to_stack(start_id);
}
while (!stack.empty()) {
int64_t node_id = stack.back();
stack.pop_back();
NodeDef& node = nodes_defs_[node_id];
erase_edge(source_node, node);
for (int64_t out_id : node.out_edges) add_to_stack(out_id);
}
}
return num_erased_edges;
}
std::string ThunkExecutor::ToString() const {
std::string str = absl::StrFormat(
"ThunkExecutor: #thunks=%d #source_nodes=%d #sink_nodes=%d",
thunk_sequence_.size(), source_.size(), sink_.size());
std::vector<std::vector<std::string>> in_edges(thunk_sequence_.size());
for (const auto& node_def : nodes_defs_) {
for (NodeId in_edge : node_def.in_edges) {
in_edges[node_def.id].push_back(thunk_sequence_[in_edge]->info().op_name);
}
}
for (NodeId i = 0; i < thunk_sequence_.size(); ++i) {
const Thunk& thunk = *thunk_sequence_[i];
bool is_source = absl::c_find(source_, i) != source_.end();
bool is_sink = absl::c_find(sink_, i) != sink_.end();
absl::StrAppendFormat(
&str,
"\n thunk #%05d: op_name=%s, dependencies=[%s], source=%v, sink=%v", i,
thunk.info().op_name, absl::StrJoin(in_edges[i], ", "), is_source,
is_sink);
}
return str;
}
} | #include "xla/service/cpu/runtime/thunk_executor.h"
#define EIGEN_USE_THREADS
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/task.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace xla::cpu {
namespace {
using ::testing::ElementsAre;
class AddI32Thunk final : public Thunk {
public:
AddI32Thunk(std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace, bool inject_error,
bool inject_side_effect);
static std::unique_ptr<Thunk> Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace = nullptr, bool inject_error = false,
bool inject_side_effect = false);
static std::vector<MaybeOwningDeviceMemory> AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data);
static absl::Status Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice);
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams&) final;
BufferUses buffer_uses() const final;
private:
std::vector<BufferAllocation::Slice> srcs_;
std::vector<BufferAllocation::Slice> dsts_;
std::vector<std::string>* trace_;
bool inject_error_;
bool inject_side_effect_;
};
std::unique_ptr<Thunk> AddI32Thunk::Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts, std::vector<std::string>* trace,
bool inject_error, bool inject_side_effect) {
return std::make_unique<AddI32Thunk>(std::move(name), std::move(srcs),
std::move(dsts), trace, inject_error,
inject_side_effect);
}
std::vector<MaybeOwningDeviceMemory> AddI32Thunk::AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data) {
std::vector<MaybeOwningDeviceMemory> buffers;
for (auto& vec : data) {
buffers.emplace_back(
se::DeviceMemoryBase(vec->data(), vec->size() * sizeof(int32_t)));
}
return buffers;
}
AddI32Thunk::AddI32Thunk(std::string name,
std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace, bool inject_error,
bool inject_side_effect)
: Thunk(Kind::kKernel, Info{name}),
srcs_(std::move(srcs)),
dsts_(std::move(dsts)),
trace_(trace),
inject_error_(inject_error),
inject_side_effect_(inject_side_effect) {}
absl::Status AddI32Thunk::Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase src,
allocations->GetDeviceAddress(src_slice));
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase dst,
allocations->GetDeviceAddress(dst_slice));
CHECK_EQ(src.size() % sizeof(int32_t), 0);
CHECK_EQ(dst.size() % sizeof(int32_t), 0);
int32_t* src_ptr = static_cast<int32_t*>(src.opaque());
int32_t* dst_ptr = static_cast<int32_t*>(dst.opaque());
size_t len = std::min(src.size(), dst.size()) / sizeof(int32_t);
for (int j = 0; j < len; ++j) dst_ptr[j] += src_ptr[j];
return absl::OkStatus();
}
tsl::AsyncValueRef<Thunk::ExecuteEvent> AddI32Thunk::Execute(
const ExecuteParams& params) {
if (trace_) trace_->push_back(info().op_name);
auto execute = [&]() -> absl::Status {
CHECK_EQ(srcs_.size(), dsts_.size());
for (int i = 0; i < srcs_.size(); ++i) {
TF_RETURN_IF_ERROR(
Execute(params.buffer_allocations, srcs_.at(i), dsts_.at(i)));
}
return absl::OkStatus();
};
if (params.intra_op_threadpool) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
params.intra_op_threadpool->getPool()->Schedule([&, event, execute] {
if (inject_error_) {
event.SetError(absl::InternalError("Injected error"));
} else {
CHECK_OK(execute());
event.SetStateConcrete();
}
});
return event;
}
if (inject_error_) {
return tsl::MakeErrorAsyncValueRef(absl::InternalError("Injected error"));
}
TF_RETURN_IF_ERROR(execute());
return Thunk::OkExecuteEvent();
}
AddI32Thunk::BufferUses AddI32Thunk::buffer_uses() const {
BufferUses buffer_uses;
for (const auto& src : srcs_) buffer_uses.push_back(BufferUse::Read(src));
for (const auto& dst : dsts_) buffer_uses.push_back(BufferUse::Write(dst));
if (inject_side_effect_) {
static auto* fake_alloc = new BufferAllocation(0, 1, 0);
buffer_uses.push_back(
BufferUse::Write(BufferAllocation::Slice(fake_alloc, 0, 1)));
}
return buffer_uses;
}
TEST(ThunkExecutorTest, DependencyOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_FALSE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0, 1));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, SequentialOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_TRUE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, TransitiveReduction) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
EXPECT_THAT(executor.node_def(0).out_edges, ElementsAre(1));
EXPECT_THAT(executor.node_def(1).in_edges, ElementsAre(0));
EXPECT_THAT(executor.node_def(1).out_edges, ElementsAre(2));
EXPECT_THAT(executor.node_def(2).in_edges, ElementsAre(1));
}
TEST(ThunkExecutorTest, Execute) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
std::vector<std::string> trace;
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}, &trace));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}, &trace));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}, &trace));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
std::vector<int32_t> data(20, 1);
auto buffers = AddI32Thunk::AsDeviceMemory({&data});
BufferAllocations allocations(buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = executor.Execute(params, [&](ThunkExecutor::Task task) {
trace.push_back("<TaskRunner>");
task();
});
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_THAT(trace, ElementsAre("<TaskRunner>", "b", "a", "c"));
EXPECT_THAT(data, ElementsAre(2, 2, 2, 2, 2,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
2, 2, 2, 2, 2));
}
struct GeneratedThunkSequence {
BufferAllocation src_alloc;
BufferAllocation dst_alloc;
std::vector<int32_t> src;
std::vector<int32_t> dst;
std::vector<int32_t> expected;
std::vector<MaybeOwningDeviceMemory> expected_buffers;
std::vector<MaybeOwningDeviceMemory> buffers;
ThunkSequence sequence;
};
static absl::StatusOr<std::unique_ptr<GeneratedThunkSequence>>
GenerateThunkSequence(size_t num_elements, size_t num_thunks,
bool inject_errors, bool inject_side_effects) {
auto g = std::make_unique<GeneratedThunkSequence>(GeneratedThunkSequence{
BufferAllocation(0, num_elements * sizeof(int32_t), 0),
BufferAllocation(1, num_elements * sizeof(int32_t), 0),
std::vector<int32_t>(num_elements, 1),
std::vector<int32_t>(num_elements, 0),
std::vector<int32_t>(num_elements, 0),
});
g->expected_buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->expected});
g->buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->dst});
std::minstd_rand0 engine;
std::uniform_int_distribution<size_t> offset_dist(0, num_elements - 1);
std::uniform_int_distribution<size_t> size_dist(32, 64);
std::uniform_int_distribution<size_t> inject_error_dist(0, num_thunks / 10);
auto random_slice = [&](BufferAllocation* alloc) {
size_t start = offset_dist(engine);
size_t size = std::min(num_elements - start, size_dist(engine));
return BufferAllocation::Slice(alloc, start * sizeof(int32_t),
size * sizeof(int32_t));
};
for (int i = 0; i < num_thunks; ++i) {
BufferAllocation::Slice src = random_slice(&g->src_alloc);
BufferAllocation::Slice dst = random_slice(&g->dst_alloc);
BufferAllocations allocations(g->expected_buffers);
TF_RETURN_IF_ERROR(AddI32Thunk::Execute(&allocations, src, dst));
bool inject_error = inject_errors && inject_error_dist(engine) == 0;
g->sequence.push_back(AddI32Thunk::Create(absl::StrCat(i), {src}, {dst},
nullptr, inject_error,
inject_side_effects));
}
return g;
}
class ThunkExecutorStressTest
: public testing::TestWithParam<
std::tuple<int32_t, bool, bool, bool, bool>> {
public:
void SetUp() override {
auto& [_, use_task_runner, use_device, inject_errors, inject_side_effects] =
GetParam();
use_task_runner_ = use_task_runner;
use_device_ = use_device;
if (use_task_runner_ || use_device_) {
thread_pool_.emplace(tsl::Env::Default(), "thunk-executor", 8);
device_.emplace(thread_pool_->AsEigenThreadPool(),
thread_pool_->NumThreads());
}
}
ThunkExecutor::TaskRunner task_runner() {
if (!use_task_runner_) return nullptr;
return [&](ThunkExecutor::Task task) {
thread_pool_->Schedule(ToCopyableTask(std::move(task)));
};
}
Eigen::ThreadPoolDevice* device() {
if (!use_device_) return nullptr;
return &*device_;
}
private:
bool use_task_runner_;
bool use_device_;
std::optional<tsl::thread::ThreadPool> thread_pool_;
std::optional<Eigen::ThreadPoolDevice> device_;
};
TEST_P(ThunkExecutorStressTest, Execute) {
auto [num_thunks, use_task_runner, use_device, inject_errors,
inject_side_effects] = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GeneratedThunkSequence> g,
GenerateThunkSequence(1024, num_thunks, inject_errors,
inject_side_effects));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(g->sequence)));
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, device()};
auto execute_event = executor.Execute(params, task_runner());
tsl::BlockUntilReady(execute_event);
if (inject_errors) {
ASSERT_TRUE(execute_event.IsError());
EXPECT_EQ(execute_event.GetError(), absl::InternalError("Injected error"));
} else {
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_EQ(g->dst, g->expected);
}
}
INSTANTIATE_TEST_SUITE_P(ThunkExecutor, ThunkExecutorStressTest,
testing::Combine(testing::ValuesIn({10, 100, 1000}),
testing::Bool(), testing::Bool(),
testing::Bool(), testing::Bool()));
static void BM_SyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
auto g = GenerateThunkSequence(1024, num_thunks,
false,
false)
.value();
auto e = ThunkExecutor::Create(std::move(g->sequence)).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
for (auto _ : state) {
auto execute_event = e.Execute(params, nullptr);
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
static void BM_AsyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "thunk-executor", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
auto g = GenerateThunkSequence(1024, num_thunks,
false,
false)
.value();
auto e = ThunkExecutor::Create(std::move(g->sequence)).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, &device};
for (auto _ : state) {
auto execute_event = e.Execute(params, [&](ThunkExecutor::Task task) {
thread_pool.Schedule(ToCopyableTask(std::move(task)));
});
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
BENCHMARK(BM_SyncThunkExecutor)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(16)
->Arg(64)
->Arg(128)
->Arg(258)
->Arg(512);
BENCHMARK(BM_AsyncThunkExecutor)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(16)
->Arg(64)
->Arg(128)
->Arg(258)
->Arg(512);
}
} | 2,028 |
#ifndef XLA_SERVICE_GPU_RUNTIME_KERNEL_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_KERNEL_THUNK_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
namespace xla {
namespace gpu {
class GpuExecutable;
class KernelThunk : public Thunk {
public:
KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim, int64_t shmem_bytes);
KernelThunk(const KernelThunk&) = delete;
KernelThunk& operator=(const KernelThunk&) = delete;
~KernelThunk() override = default;
std::string ToString(int indent) const override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const std::vector<BufferAllocation::Slice>& arguments() const {
return args_;
}
const std::vector<bool>& written() const { return written_; }
const std::string& kernel_name() const { return kernel_name_; }
const LaunchDimensions& launch_dimensions() const {
return launch_dimensions_;
}
int64_t shmem_bytes() const { return shmem_bytes_; }
private:
std::vector<BufferAllocation::Slice> args_;
std::vector<bool> written_;
const std::string kernel_name_;
const LaunchDimensions launch_dimensions_;
const std::optional<se::ClusterDim> cluster_dim_;
int64_t shmem_bytes_;
mutable absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>>
kernel_cache_ ABSL_GUARDED_BY(mutex_);
};
class CustomKernelThunk : public Thunk {
public:
CustomKernelThunk(const HloInstruction* inst, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments);
std::string ToString(int indent) const override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const CustomKernel& custom_kernel() const { return custom_kernel_; }
const std::vector<BufferAllocation::Slice>& arguments() const {
return args_;
}
std::string_view custom_kernel_name() const { return custom_kernel_.name(); }
const std::vector<bool>& written() const { return written_; }
LaunchDimensions launch_dimensions() const {
return LaunchDimensions(custom_kernel_.block_dims(),
custom_kernel_.thread_dims());
}
int64_t shmem_bytes() const { return custom_kernel_.shared_memory_bytes(); }
private:
std::vector<BufferAllocation::Slice> args_;
std::vector<bool> written_;
CustomKernel custom_kernel_;
mutable absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>>
kernel_cache_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
KernelThunk::KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim,
int64_t shmem_bytes)
: Thunk(Kind::kKernel, Thunk::ThunkInfo::WithProfileAnnotation(instr)),
kernel_name_(std::move(kernel_name)),
launch_dimensions_(std::move(launch_dimensions)),
cluster_dim_(std::move(cluster_dim)),
shmem_bytes_(shmem_bytes) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string KernelThunk::ToString(int indent) const {
return absl::StrFormat(
", kernel = %s, launch dimensions = %s, cluster_dim = %s", kernel_name_,
launch_dimensions_.ToString(),
cluster_dim_.has_value() ? cluster_dim_->ToString() : "nullopt");
}
absl::Status KernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
CreateKernel(kernel_name_, args_.size(), params.src.text,
params.src.binary, params.executor, shmem_bytes_));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
static void PrintBufferContents(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> buffer_args) {
int input_idx = 0;
for (const se::DeviceMemoryBase& buf : buffer_args) {
auto host_buffer = std::make_unique<char[]>(buf.size());
CHECK_OK(stream->Memcpy(host_buffer.get(), buf, buf.size()));
CHECK_OK(stream->BlockHostUntilDone());
std::string buffer_contents;
for (int i = 0; i < buf.size(); i++) {
absl::StrAppendFormat(&buffer_contents, "%x ",
static_cast<unsigned>(host_buffer[i]));
}
VLOG(100) << "BUF(" << input_idx++ << ") = " << buffer_contents;
}
}
absl::Status KernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
LaunchDimensions launch_dimensions;
std::optional<se::ClusterDim> cluster_dim;
const se::Kernel* kernel = nullptr;
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
{
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(executor);
CHECK(it != kernel_cache_.end())
<< "Initialize() not called for StreamExecutor " << executor;
launch_dimensions = launch_dimensions_;
cluster_dim = cluster_dim_;
kernel = it->second.get();
}
VLOG(3) << "Launching " << kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(stream, buffer_args);
}
if (cluster_dim.has_value()) {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
cluster_dim.value(), stream);
} else {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
stream);
}
}
CustomKernelThunk::CustomKernelThunk(
const HloInstruction* instr, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments)
: Thunk(Kind::kCustomKernel,
Thunk::ThunkInfo::WithProfileAnnotation(instr)),
custom_kernel_(std::move(custom_kernel)) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string CustomKernelThunk::ToString(int indent) const {
return custom_kernel_.ToString();
}
absl::Status CustomKernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
se::KernelFactory::Create(
params.executor, custom_kernel_.kernel_spec()));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
absl::Status CustomKernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
const se::Kernel* kernel = [&] {
absl::MutexLock lock(&mutex_);
return kernel_cache_[executor].get();
}();
VLOG(3) << "Launching " << custom_kernel_.ToString() << " as device kernel "
<< kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(params.stream, buffer_args);
}
se::KernelArgsDeviceMemoryArray args(buffer_args,
custom_kernel_.shared_memory_bytes());
if (auto cluster = custom_kernel_.cluster_dims(); cluster.has_value()) {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *cluster, *kernel,
args);
} else {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *kernel, args);
}
}
}
} | #include "xla/service/cpu/runtime/kernel_thunk.h"
#include <cstddef>
#include <cstdint>
#include <string_view>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class AddF32HostKernels : public Thunk::HostKernels {
public:
absl::StatusOr<SE_HOST_Kernel*> Find(std::string_view name) override {
return +[](const SE_HOST_KernelCallFrame* call_frame) {
const SE_HOST_KernelArg& in = call_frame->args[0];
const SE_HOST_KernelArg& out = call_frame->args[1];
float* in_ptr = reinterpret_cast<float*>(in.data);
float* out_ptr = reinterpret_cast<float*>(out.data);
uint64_t i = call_frame->thread->x;
*(out_ptr + i) = *(in_ptr + i) + *(in_ptr + i);
return static_cast<SE_HOST_KernelError*>(nullptr);
};
}
};
TEST(KernelThunkTest, CheckAlignment) {
auto thunk = KernelThunk::Create({"test"}, {}, {}, "test", se::ThreadDim(),
3);
EXPECT_TRUE(absl::StrContains(thunk.status().message(),
"minimum alignment 3 is not a power of 2"));
}
TEST(KernelThunkTest, AddF32) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in = {1.0, 2.0, 3.0, 4.0};
std::vector<float> out(4, 0.0);
size_t size_in_bytes = in.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_alloc(0, size_in_bytes, 0);
BufferAllocation out_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_slice(&in_alloc, 0, size_in_bytes);
BufferAllocation::Slice out_slice(&out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create({"add_f32"}, {in_slice}, {out_slice},
"add_f32", se::ThreadDim(4)));
AddF32HostKernels host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {2.0, 4.0, 6.0, 8.0};
EXPECT_EQ(out, expected);
}
}
} | 2,029 |
#ifndef XLA_SERVICE_GPU_RUNTIME_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_THUNK_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/Operation.h"
#include "xla/executable_run_options.h"
#include "xla/ffi/execution_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/lib/gtl/int_type.h"
namespace xla {
namespace gpu {
TSL_LIB_GTL_DEFINE_INT_TYPE(ExecutionStreamId, uint64_t);
class Thunk {
public:
using ExecutionStreamIdMap =
absl::flat_hash_map<ExecutionStreamId, se::Stream*>;
static constexpr auto kDefaultExecutionStreamId = ExecutionStreamId(0);
enum Kind {
kAddressComputation,
kCholesky,
kConditional,
kConvolution,
kConvolutionReorder,
kCopy,
kCopyDone,
kCommandBuffer,
kCubSort,
kCublasLtMatmul,
kCustomCall,
kCustomKernel,
kFft,
kGemm,
kInfeed,
kKernel,
kMemset32BitValue,
kMemzero,
kNcclAllGather,
kNcclAllGatherStart,
kNcclAllGatherDone,
kNcclAllReduce,
kNcclAllReduceStart,
kNcclAllReduceDone,
kNcclCollectiveBroadcast,
kNcclCollectiveBroadcastStart,
kNcclCollectiveBroadcastDone,
kNcclCollectivePermute,
kNcclCollectivePermuteStart,
kNcclCollectivePermuteDone,
kNcclReduceScatter,
kNcclReduceScatterStart,
kNcclReduceScatterDone,
kNcclAllToAll,
kNcclAllToAllStart,
kNcclAllToAllDone,
kNcclSend,
kNcclSendDone,
kNcclRecv,
kNcclRecvDone,
kNorm,
kOutfeed,
kPartitionId,
kRecv,
kRecvDone,
kReplicaId,
kSequential,
kSend,
kSendDone,
kTriangularSolve,
kWhile,
kFusedMHA,
kWaitForStreams,
kCuDnn
};
using BinaryMap = absl::flat_hash_map<std::string, std::string>;
struct ExecutableSource {
std::string_view text;
absl::Span<const uint8_t> binary;
BinaryMap dnn_compiled_graphs;
};
struct ThunkInfo {
ThunkInfo() = default;
static ThunkInfo WithProfileAnnotation(const HloInstruction* instr);
std::string profile_annotation;
ExecutionStreamId execution_stream_id = kDefaultExecutionStreamId;
};
class ResourceRequests {
public:
virtual ~ResourceRequests() = default;
virtual absl::Status AddClique(const NcclCliqueKey& clique_key,
int32_t num_local_participants) = 0;
};
class CollectiveCliques {
public:
CollectiveCliques() = default;
explicit CollectiveCliques(NcclClique::AcquiredCliquesMap cliques_map);
absl::StatusOr<NcclApi::NcclCommHandle> GetComm(
const NcclCliqueKey& clique_key, int32_t rank) const;
absl::StatusOr<size_t> num_communicators(
const NcclCliqueKey& clique_key) const;
absl::StatusOr<bool> is_local_clique(const NcclCliqueKey& clique_key) const;
bool empty() const { return cliques_map_.empty(); }
private:
NcclClique::AcquiredCliquesMap cliques_map_;
};
struct CollectiveExecuteParams {
static absl::StatusOr<CollectiveExecuteParams> Create(
const ServiceExecutableRunOptions& run_options,
absl::Span<se::Stream* const> async_streams,
int64_t local_device_ordinal, int64_t collective_max_nchannels = 0,
int64_t p2p_max_nchannels = 0);
using GlobalDeviceIdMap = std::map<int32_t, GlobalDeviceId>;
se::StreamExecutor* executor;
RunId run_id;
absl::InlinedVector<se::Stream*, 4> async_streams;
int64_t local_device_ordinal;
GlobalDeviceId global_device_id;
const DeviceAssignment* device_assn;
const GlobalDeviceIdMap* global_device_id_map;
const NcclCliqueIdCallback* nccl_clique_id_callback;
int64_t collective_max_nchannels;
int64_t p2p_max_nchannels;
private:
CollectiveExecuteParams(se::StreamExecutor* executor, RunId run_id,
absl::Span<se::Stream* const> async_streams,
int64_t local_device_ordinal,
GlobalDeviceId global_device_id,
const DeviceAssignment* device_assn,
const GlobalDeviceIdMap* global_device_id_map,
const NcclCliqueIdCallback* nccl_clique_id_callback,
int64_t collective_max_nchannels,
int64_t p2p_max_nchannels);
};
struct PrepareParams {
const CollectiveExecuteParams* collective_params = nullptr;
};
struct InitializeParams {
se::StreamExecutor* executor = nullptr;
ExecutableSource src;
const BufferAllocations* buffer_allocations = nullptr;
se::Stream* stream = nullptr;
se::Stream* command_buffer_trace_stream = nullptr;
CollectiveExecuteParams* collective_params = nullptr;
CollectiveCliques* collective_cliques = nullptr;
const ffi::ExecutionContext* ffi_execution_context = nullptr;
};
struct ExecuteParams {
static ExecuteParams Create(
const ServiceExecutableRunOptions& run_options,
const BufferAllocations& buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
ExecutionStreamIdMap additional_compute_streams = {});
static ExecuteParams CloneWithNewAllocations(
const ExecuteParams& params,
const BufferAllocations& buffer_allocations);
const BufferAllocations* buffer_allocations;
se::Stream* stream;
se::Stream* command_buffer_trace_stream;
CollectiveExecuteParams* collective_params;
CollectiveCliques* collective_cliques;
se::Stream* device_to_host_stream;
se::Stream* host_to_device_stream;
SendDeviceMemoryFunction* send_device_memory_function;
RecvDeviceMemoryFunction* recv_device_memory_function;
const ffi::ExecutionContext* ffi_execution_context;
ExecutionStreamIdMap additional_compute_streams;
bool mock_collectives = false;
private:
friend class CommandBufferThunk;
ExecuteParams(const BufferAllocations* buffer_allocations,
se::Stream* stream, se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
se::Stream* device_to_host_stream,
se::Stream* host_to_device_stream,
SendDeviceMemoryFunction* send_device_memory_function,
RecvDeviceMemoryFunction* recv_device_memory_function,
const ffi::ExecutionContext* ffi_execution_context,
ExecutionStreamIdMap additional_compute_streams = {},
bool mock_collectives = false);
};
Thunk(Kind kind, ThunkInfo thunk_info)
: kind_(kind),
profile_annotation_(thunk_info.profile_annotation),
execution_stream_id_(thunk_info.execution_stream_id) {}
virtual ~Thunk() = default;
Thunk(const Thunk&) = delete;
Thunk& operator=(const Thunk&) = delete;
virtual std::string ToString(int indent) const { return ""; }
Kind kind() const { return kind_; }
std::string_view profile_annotation() const { return profile_annotation_; }
virtual absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
return absl::OkStatus();
}
virtual absl::Status Initialize(const InitializeParams& params) {
return absl::OkStatus();
}
virtual absl::Status ExecuteOnStream(const ExecuteParams& params) = 0;
static absl::string_view KindToString(Thunk::Kind kind);
ExecutionStreamId execution_stream_id() const { return execution_stream_id_; }
void set_execution_stream_id(ExecutionStreamId execution_stream_id) {
execution_stream_id_ = execution_stream_id;
}
static absl::StatusOr<se::Stream*> GetStreamForExecution(
ExecutionStreamId stream_id, const ExecuteParams& params);
bool IsCollective() const;
private:
Kind kind_;
std::string profile_annotation_;
ExecutionStreamId execution_stream_id_;
};
using ThunkSequence = std::vector<std::unique_ptr<Thunk>>;
std::ostream& operator<<(std::ostream& os, Thunk::Kind kind);
struct ShapedSlice {
BufferAllocation::Slice slice;
Shape shape;
};
bool IsReductionCollective(Thunk::Kind kind);
}
}
#endif
#include "xla/service/gpu/runtime/thunk.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/ffi/execution_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/stream.h"
#include "xla/translate/mhlo_to_hlo/location_exporter.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
Thunk::CollectiveCliques::CollectiveCliques(
NcclClique::AcquiredCliquesMap cliques_map)
: cliques_map_(std::move(cliques_map)) {}
absl::StatusOr<NcclApi::NcclCommHandle> Thunk::CollectiveCliques::GetComm(
const NcclCliqueKey& clique_key, int32_t rank) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
auto communicator = (*clique->second)->comm(rank);
if (!communicator.has_value()) {
return absl::InternalError(absl::StrCat("Communicator for rank ", rank,
" not found in a NCCL clique ",
clique_key.ToString()));
}
return *communicator;
}
absl::StatusOr<bool> Thunk::CollectiveCliques::is_local_clique(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->IsLocal();
}
absl::StatusOr<size_t> Thunk::CollectiveCliques::num_communicators(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->num_communicators();
}
using GlobalDeviceIdMap = Thunk::CollectiveExecuteParams::GlobalDeviceIdMap;
static absl::StatusOr<GlobalDeviceId> GetGlobalDeviceId(
const GlobalDeviceIdMap* device_id_map, int64_t local_device_ordinal) {
if (!device_id_map) return GlobalDeviceId(local_device_ordinal);
auto it = device_id_map->find(local_device_ordinal);
if (it == device_id_map->end())
return absl::NotFoundError(
absl::StrCat("No global device id found for local device ordinal: ",
local_device_ordinal));
return it->second;
}
absl::StatusOr<Thunk::CollectiveExecuteParams>
Thunk::CollectiveExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels) {
const GpuExecutableRunOptions* gpu_options =
run_options.run_options().gpu_executable_run_options();
auto* device_id_map = gpu_options && gpu_options->gpu_global_device_ids()
? &*gpu_options->gpu_global_device_ids()
: nullptr;
auto* nccl_callback = gpu_options && gpu_options->nccl_clique_id_callback()
? &gpu_options->nccl_clique_id_callback()
: nullptr;
TF_ASSIGN_OR_RETURN(GlobalDeviceId global_device_id,
GetGlobalDeviceId(device_id_map, local_device_ordinal));
return CollectiveExecuteParams(
run_options.stream()->parent(), run_options.run_options().run_id(),
async_streams, local_device_ordinal, global_device_id,
run_options.run_options().device_assignment(), device_id_map,
nccl_callback, collective_max_nchannels, p2p_max_nchannels);
}
Thunk::CollectiveExecuteParams::CollectiveExecuteParams(
se::StreamExecutor* executor, RunId run_id,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
GlobalDeviceId global_device_id, const DeviceAssignment* device_assn,
const GlobalDeviceIdMap* global_device_id_map,
const NcclCliqueIdCallback* nccl_clique_id_callback,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels)
: executor(executor),
run_id(run_id),
async_streams(async_streams.begin(), async_streams.end()),
local_device_ordinal(local_device_ordinal),
global_device_id(global_device_id),
device_assn(device_assn),
global_device_id_map(global_device_id_map),
nccl_clique_id_callback(nccl_clique_id_callback),
collective_max_nchannels(collective_max_nchannels),
p2p_max_nchannels(p2p_max_nchannels) {}
Thunk::ExecuteParams Thunk::ExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
const BufferAllocations& buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
ExecutionStreamIdMap additional_compute_streams) {
return ExecuteParams(&buffer_allocations, stream, command_buffer_trace_stream,
collective_params, collective_cliques,
run_options.run_options().device_to_host_stream(),
run_options.run_options().host_to_device_stream(),
run_options.run_options().send_device_memory_function(),
run_options.run_options().recv_device_memory_function(),
run_options.run_options().ffi_execution_context(),
additional_compute_streams,
run_options.run_options().gpu_executable_run_options()
? run_options.run_options()
.gpu_executable_run_options()
->enable_mock_nccl_collectives()
: false);
}
Thunk::ExecuteParams Thunk::ExecuteParams::CloneWithNewAllocations(
const Thunk::ExecuteParams& params,
const BufferAllocations& buffer_allocations) {
return ExecuteParams(
&buffer_allocations, params.stream, params.command_buffer_trace_stream,
params.collective_params, params.collective_cliques,
params.device_to_host_stream, params.host_to_device_stream,
params.send_device_memory_function, params.recv_device_memory_function,
params.ffi_execution_context, params.additional_compute_streams);
}
Thunk::ExecuteParams::ExecuteParams(
const BufferAllocations* buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques, se::Stream* device_to_host_stream,
se::Stream* host_to_device_stream,
SendDeviceMemoryFunction* send_device_memory_function,
RecvDeviceMemoryFunction* recv_device_memory_function,
const ffi::ExecutionContext* ffi_execution_context,
ExecutionStreamIdMap additional_compute_streams, bool mock_collectives)
: buffer_allocations(buffer_allocations),
stream(stream),
command_buffer_trace_stream(command_buffer_trace_stream),
collective_params(collective_params),
collective_cliques(collective_cliques),
device_to_host_stream(device_to_host_stream),
host_to_device_stream(host_to_device_stream),
send_device_memory_function(send_device_memory_function),
recv_device_memory_function(recv_device_memory_function),
ffi_execution_context(ffi_execution_context),
additional_compute_streams(additional_compute_streams),
mock_collectives(mock_collectives) {}
absl::string_view Thunk::KindToString(Thunk::Kind kind) {
#define CASE(x) \
case Thunk::x: \
return #x
switch (kind) {
CASE(kAddressComputation);
CASE(kCholesky);
CASE(kCommandBuffer);
CASE(kConditional);
CASE(kConvolution);
CASE(kConvolutionReorder);
CASE(kCopy);
CASE(kCopyDone);
CASE(kCubSort);
CASE(kCublasLtMatmul);
CASE(kCustomCall);
CASE(kCustomKernel);
CASE(kNcclAllGather);
CASE(kNcclAllGatherStart);
CASE(kNcclAllGatherDone);
CASE(kNcclAllReduce);
CASE(kNcclAllReduceStart);
CASE(kNcclAllReduceDone);
CASE(kNcclCollectiveBroadcast);
CASE(kNcclCollectiveBroadcastStart);
CASE(kNcclCollectiveBroadcastDone);
CASE(kNcclCollectivePermute);
CASE(kNcclCollectivePermuteStart);
CASE(kNcclCollectivePermuteDone);
CASE(kNcclReduceScatter);
CASE(kNcclReduceScatterStart);
CASE(kNcclReduceScatterDone);
CASE(kNcclAllToAll);
CASE(kNcclAllToAllStart);
CASE(kNcclAllToAllDone);
CASE(kNcclSend);
CASE(kNcclSendDone);
CASE(kNcclRecv);
CASE(kNcclRecvDone);
CASE(kFft);
CASE(kGemm);
CASE(kInfeed);
CASE(kKernel);
CASE(kMemset32BitValue);
CASE(kMemzero);
CASE(kNorm);
CASE(kOutfeed);
CASE(kSend);
CASE(kSendDone);
CASE(kPartitionId);
CASE(kReplicaId);
CASE(kRecv);
CASE(kRecvDone);
CASE(kSequential);
CASE(kTriangularSolve);
CASE(kWhile);
CASE(kFusedMHA);
CASE(kWaitForStreams);
CASE(kCuDnn);
}
}
absl::StatusOr<se::Stream*> Thunk::GetStreamForExecution(
ExecutionStreamId stream_id, const ExecuteParams& params) {
if (stream_id == kDefaultExecutionStreamId) {
return params.stream;
}
auto iter = params.additional_compute_streams.find(stream_id);
if (iter == params.additional_compute_streams.end()) {
return absl::InvalidArgumentError("Invalid execution stream id.");
}
return iter->second;
}
std::ostream& operator<<(std::ostream& os, Thunk::Kind kind) {
return os << Thunk::KindToString(kind);
}
bool IsReductionCollective(Thunk::Kind kind) {
return kind == Thunk::kNcclAllReduce || kind == Thunk::kNcclAllReduceStart ||
kind == Thunk::kNcclReduceScatter ||
kind == Thunk::kNcclReduceScatterStart;
}
Thunk::ThunkInfo Thunk::ThunkInfo::WithProfileAnnotation(
const HloInstruction* instr) {
ThunkInfo thunk_info;
thunk_info.profile_annotation = instr->name();
auto gpu_backend_config = instr->backend_config<GpuBackendConfig>();
if (gpu_backend_config.ok()) {
thunk_info.execution_stream_id =
std::max<uint64_t>(kDefaultExecutionStreamId.value(),
gpu_backend_config->operation_queue_id());
}
return thunk_info;
}
bool Thunk::IsCollective() const {
switch (kind()) {
case kNcclAllGather:
case kNcclAllGatherStart:
case kNcclAllGatherDone:
case kNcclAllReduce:
case kNcclAllReduceStart:
case kNcclAllReduceDone:
case kNcclCollectiveBroadcast:
case kNcclCollectiveBroadcastStart:
case kNcclCollectiveBroadcastDone:
case kNcclCollectivePermute:
case kNcclCollectivePermuteStart:
case kNcclCollectivePermuteDone:
case kNcclReduceScatter:
case kNcclReduceScatterStart:
case kNcclReduceScatterDone:
case kNcclAllToAll:
case kNcclAllToAllStart:
case kNcclAllToAllDone:
case kNcclSend:
case kNcclSendDone:
case kNcclRecv:
case kNcclRecvDone:
return true;
default:
return false;
}
}
}
} | #include "xla/service/cpu/runtime/thunk.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(ThunkTest, OkExecuteEvent) {
auto event = Thunk::OkExecuteEvent();
ASSERT_TRUE(event.IsConcrete());
}
}
} | 2,030 |
#ifndef XLA_SERVICE_CPU_RUNTIME_LOGICAL_ID_THUNK_H_
#define XLA_SERVICE_CPU_RUNTIME_LOGICAL_ID_THUNK_H_
#include <cstdint>
#include <memory>
#include "absl/status/statusor.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/global_device_id.h"
#include "xla/tsl/concurrency/async_value_ref.h"
namespace xla::cpu {
enum class LogicalIdKind {
kPartitionId,
kReplicaId,
};
template <LogicalIdKind type>
class LogicalIdThunk : public Thunk {
public:
static absl::StatusOr<std::unique_ptr<LogicalIdThunk>> Create(
Info info, BufferAllocation::Slice logical_id_buffer);
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams& params) final;
BufferUses buffer_uses() const final;
private:
LogicalIdThunk(Info info, BufferAllocation::Slice logical_id_buffer);
absl::StatusOr<int32_t> GetIdForDevice(
const DeviceAssignment* device_assignment,
GlobalDeviceId device_id) const;
BufferAllocation::Slice logical_id_buffer_;
};
class ReplicaIdThunk final : public LogicalIdThunk<LogicalIdKind::kReplicaId> {
};
class PartitionIdThunk final
: public LogicalIdThunk<LogicalIdKind::kPartitionId> {};
}
#endif
#include "xla/service/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/global_device_id.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
static Thunk::Kind ToThunkKind(LogicalIdKind logical_id_kind) {
switch (logical_id_kind) {
case LogicalIdKind::kPartitionId:
return Thunk::Kind::kPartitionId;
case LogicalIdKind::kReplicaId:
return Thunk::Kind::kReplicaId;
}
}
template <LogicalIdKind type>
absl::StatusOr<std::unique_ptr<LogicalIdThunk<type>>>
LogicalIdThunk<type>::Create(Info info,
BufferAllocation::Slice logical_id_buffer) {
return absl::WrapUnique(
new LogicalIdThunk(std::move(info), logical_id_buffer));
}
template <LogicalIdKind type>
LogicalIdThunk<type>::LogicalIdThunk(Info info,
BufferAllocation::Slice logical_id_buffer)
: Thunk(ToThunkKind(type), info), logical_id_buffer_(logical_id_buffer) {}
template <LogicalIdKind type>
static constexpr auto ToString() {
if constexpr (type == LogicalIdKind::kPartitionId) {
return "Partition";
} else if constexpr (type == LogicalIdKind::kReplicaId) {
return "Replica";
}
}
template <LogicalIdKind type>
absl::StatusOr<int32_t> LogicalIdThunk<type>::GetIdForDevice(
const DeviceAssignment* device_assignment, GlobalDeviceId device_id) const {
if constexpr (type == LogicalIdKind::kPartitionId) {
return device_assignment->PartitionIdForDevice(device_id);
} else if constexpr (type == LogicalIdKind::kReplicaId) {
return device_assignment->ReplicaIdForDevice(device_id);
}
}
template <LogicalIdKind type>
tsl::AsyncValueRef<typename LogicalIdThunk<type>::ExecuteEvent>
LogicalIdThunk<type>::Execute(const ExecuteParams& params) {
tsl::profiler::TraceMe trace([&] { return TraceMeEncode(); });
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase logical_id_data,
params.buffer_allocations->GetDeviceAddress(logical_id_buffer_));
TF_RET_CHECK(logical_id_data.size() == sizeof(int32_t))
<< "Logical id buffer must be able to fit logical id value";
TF_RET_CHECK(params.collective_params)
<< ToString<type>() << " id requires collective params";
TF_ASSIGN_OR_RETURN(
int32_t logical_id,
GetIdForDevice(params.collective_params->device_assignment,
params.collective_params->global_device_id));
VLOG(3) << absl::StreamFormat("%s id: %d", ToString<type>(), logical_id);
VLOG(3) << absl::StreamFormat(" logical_id: slice %s (%p)",
logical_id_buffer_.ToString(),
logical_id_data.opaque());
std::memcpy(logical_id_data.opaque(), &logical_id, sizeof(int32_t));
return OkExecuteEvent();
}
template <LogicalIdKind type>
using BufferUses = typename LogicalIdThunk<type>::BufferUses;
template <LogicalIdKind type>
BufferUses<type> LogicalIdThunk<type>::buffer_uses() const {
return {BufferUse::Write(logical_id_buffer_)};
}
template class LogicalIdThunk<LogicalIdKind::kReplicaId>;
template class LogicalIdThunk<LogicalIdKind::kPartitionId>;
} | #include "xla/service/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/executable_run_options.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
absl::StatusOr<DeviceAssignment> CreateDeviceAssignment(
std::vector<std::vector<int64_t>> devices) {
const auto computation_count = devices.size();
if (devices.empty()) {
return absl::InternalError("Devices must not be empty.");
}
const auto replica_count = devices[0].size();
DeviceAssignment device_assignment(replica_count, computation_count);
for (int64_t partition = 0; partition < computation_count; ++partition) {
for (int64_t replica = 0; replica < replica_count; ++replica) {
device_assignment(replica, partition) = devices[partition][replica];
}
}
return device_assignment;
}
TEST(LogicalIdThunkTest, GetReplicaId) {
std::vector<int32_t> dst(1, -1);
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), sizeof(int32_t)));
BufferAllocation alloc(0, sizeof(int32_t), 0);
BufferAllocation::Slice id_slice(&alloc, 0,
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kReplicaId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk, ReplicaIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0, 1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], 0);
}
TEST(LogicalIdThunkTest, GetPartitionId) {
std::vector<int32_t> dst(2, -1);
std::vector<MaybeOwningDeviceMemory> buffers;
static constexpr auto kDataSize = 2 * sizeof(int32_t);
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), kDataSize));
BufferAllocation alloc(0, kDataSize, 0);
BufferAllocation::Slice id_slice(&alloc, sizeof(int32_t),
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kPartitionId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
PartitionIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0}, {1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], -1);
EXPECT_EQ(dst[1], 0);
}
}
} | 2,031 |
#ifndef XLA_SERVICE_GPU_RUNTIME_CONDITIONAL_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_CONDITIONAL_THUNK_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
struct ConditionalThunkConfig {
bool branch_index_is_bool;
int64_t branch_count;
std::vector<std::unique_ptr<SequentialThunk>> branch_thunks;
};
class ConditionalThunk : public Thunk {
public:
ConditionalThunk(ThunkInfo thunk_info, ConditionalThunkConfig config,
const BufferAllocation::Slice& branch_index_buffer_index);
ConditionalThunk(const ConditionalThunk&) = delete;
ConditionalThunk& operator=(const ConditionalThunk&) = delete;
absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
absl::Span<const std::unique_ptr<SequentialThunk>> branch_thunks() const {
return config_.branch_thunks;
}
const BufferAllocation::Slice& branch_index_buffer() const {
return branch_index_buffer_index_;
}
private:
const ConditionalThunkConfig config_;
const BufferAllocation::Slice branch_index_buffer_index_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*,
std::unique_ptr<se::MemoryAllocation>>
predicates_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "xla/service/gpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <variant>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
ConditionalThunk::ConditionalThunk(
ThunkInfo thunk_info, ConditionalThunkConfig config,
const BufferAllocation::Slice& branch_index_buffer_index)
: Thunk(Kind::kConditional, thunk_info),
config_(std::move(config)),
branch_index_buffer_index_(branch_index_buffer_index) {}
absl::Status ConditionalThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Prepare(params, resource_requests));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::Initialize(const InitializeParams& params) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Initialize(params));
}
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(
config_.branch_index_is_bool ? sizeof(bool) : sizeof(int32_t)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
auto branch_index_or_pred = [&]() -> std::variant<int32_t*, bool*> {
absl::MutexLock lock(&mutex_);
se::StreamExecutor* executor = stream.parent();
if (config_.branch_index_is_bool) {
return reinterpret_cast<bool*>(predicates_.at(executor)->opaque());
} else {
return reinterpret_cast<int32_t*>(predicates_.at(executor)->opaque());
}
}();
se::DeviceMemoryBase branch_index_address =
params.buffer_allocations->GetDeviceAddress(branch_index_buffer_index_);
if (config_.branch_index_is_bool) {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<bool*>(branch_index_or_pred),
branch_index_address, sizeof(bool)));
} else {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<int32_t*>(branch_index_or_pred),
branch_index_address, sizeof(int32_t)));
}
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return Internal("Failed to retrieve branch_index value on stream %p: %s.",
&stream, blocked.message());
}
int32_t branch_index = std::visit(
VariantVisitor{[](int32_t* branch_index) { return *branch_index; },
[](bool* pred) { return *pred ? 0 : 1; }},
branch_index_or_pred);
if (branch_index < 0 || branch_index >= config_.branch_count) {
branch_index = config_.branch_count - 1;
}
TF_RETURN_IF_ERROR(
config_.branch_thunks[branch_index]->ExecuteOnStream(params));
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/cpu/runtime/thunk_testlib.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(ConditionalThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice branch_index_slice(&alloc, 0, sizeof(int32_t));
BufferAllocation::Slice read_slice(&alloc, 10, 10);
std::vector<ThunkSequence> branch_sequences(1);
branch_sequences[0].push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, ConditionalThunk::Create({"conditional"}, branch_index_slice,
std::move(branch_sequences)));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(branch_index_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(read_slice));
}
}
} | 2,032 |
#ifndef XLA_SERVICE_GRAPHCYCLES_GRAPHCYCLES_H_
#define XLA_SERVICE_GRAPHCYCLES_GRAPHCYCLES_H_
#include <vector>
#include <optional>
#include "absl/types/span.h"
namespace tensorflow {
class GraphCycles {
public:
GraphCycles();
~GraphCycles();
int32_t NewNode();
void RemoveNode(int32_t node);
bool InsertEdge(int32_t source_node, int32_t dest_node);
void RemoveEdge(int32_t source_node, int32_t dest_node);
bool HasEdge(int32_t source_node, int32_t dest_node) const;
std::optional<int32_t> ContractEdge(int32_t a, int32_t b);
bool CanContractEdge(int32_t a, int32_t b);
bool IsReachable(int32_t source_node, int32_t dest_node) const;
bool IsReachableNonConst(int32_t source_node, int32_t dest_node);
void *GetNodeData(int32_t node) const;
void SetNodeData(int32_t node, void *data);
int FindPath(int32_t source, int32_t dest, int max_path_len,
int32_t path[]) const;
bool CheckInvariants() const;
absl::Span<const int32_t> Successors(int32_t node) const;
absl::Span<const int32_t> Predecessors(int32_t node) const;
std::vector<int32_t> SuccessorsCopy(int32_t node) const;
std::vector<int32_t> PredecessorsCopy(int32_t node) const;
std::vector<int32_t> AllNodesInPostOrder() const;
std::string DebugString() const;
struct Rep;
private:
Rep *rep_;
GraphCycles(const GraphCycles &) = delete;
GraphCycles &operator=(const GraphCycles &) = delete;
};
}
#endif
#include "xla/service/graphcycles/graphcycles.h"
#include <algorithm>
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/service/graphcycles/ordered_set.h"
#include "tsl/platform/logging.h"
namespace tensorflow {
namespace {
using NodeSet = absl::flat_hash_set<int32_t>;
using OrderedNodeSet = OrderedSet<int32_t>;
struct Node {
int32_t rank;
bool visited;
};
struct NodeIO {
OrderedNodeSet in;
OrderedNodeSet out;
};
}
struct GraphCycles::Rep {
std::vector<Node> nodes_;
std::vector<NodeIO> node_io_;
std::vector<int32_t> free_nodes_;
std::vector<int32_t> deltaf_;
std::vector<int32_t> deltab_;
std::vector<int32_t> list_;
std::vector<int32_t> merged_;
std::vector<int32_t>
stack_;
std::vector<void*> node_data_;
};
GraphCycles::GraphCycles() : rep_(new Rep) {}
GraphCycles::~GraphCycles() {
delete rep_;
}
bool GraphCycles::CheckInvariants() const {
Rep* r = rep_;
NodeSet ranks;
for (size_t x = 0; x < r->nodes_.size(); x++) {
Node* nx = &r->nodes_[x];
if (nx->visited) {
LOG(FATAL) << "Did not clear visited marker on node " << x;
}
if (!ranks.insert(nx->rank).second) {
LOG(FATAL) << "Duplicate occurrence of rank " << nx->rank;
}
NodeIO* nx_io = &r->node_io_[x];
for (int32_t y : nx_io->out.GetSequence()) {
Node* ny = &r->nodes_[y];
if (nx->rank >= ny->rank) {
LOG(FATAL) << "Edge " << x << "->" << y << " has bad rank assignment "
<< nx->rank << "->" << ny->rank;
}
}
}
return true;
}
int32_t GraphCycles::NewNode() {
if (rep_->free_nodes_.empty()) {
Node n;
n.visited = false;
n.rank = rep_->nodes_.size();
rep_->nodes_.emplace_back(n);
rep_->node_io_.emplace_back();
rep_->node_data_.push_back(nullptr);
return n.rank;
} else {
int32_t r = rep_->free_nodes_.back();
rep_->free_nodes_.pop_back();
rep_->node_data_[r] = nullptr;
return r;
}
}
void GraphCycles::RemoveNode(int32_t node) {
NodeIO* x = &rep_->node_io_[node];
for (int32_t y : x->out.GetSequence()) {
rep_->node_io_[y].in.Erase(node);
}
for (int32_t y : x->in.GetSequence()) {
rep_->node_io_[y].out.Erase(node);
}
x->in.Clear();
x->out.Clear();
rep_->free_nodes_.push_back(node);
}
void* GraphCycles::GetNodeData(int32_t node) const {
return rep_->node_data_[node];
}
void GraphCycles::SetNodeData(int32_t node, void* data) {
rep_->node_data_[node] = data;
}
bool GraphCycles::HasEdge(int32_t x, int32_t y) const {
return rep_->node_io_[x].out.Contains(y);
}
void GraphCycles::RemoveEdge(int32_t x, int32_t y) {
rep_->node_io_[x].out.Erase(y);
rep_->node_io_[y].in.Erase(x);
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
static void Reorder(GraphCycles::Rep* r);
static void Sort(absl::Span<const Node>, std::vector<int32_t>* delta);
static void MoveToList(GraphCycles::Rep* r, std::vector<int32_t>* src,
std::vector<int32_t>* dst);
static void ClearVisitedBits(GraphCycles::Rep* r,
absl::Span<const int32_t> visited_indices);
bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
NodeIO* nx_io = &r->node_io_[x];
if (!nx_io->out.Insert(y)) {
return true;
}
NodeIO* ny_io = &r->node_io_[y];
ny_io->in.Insert(x);
Node* nx = &r->nodes_[x];
Node* ny = &r->nodes_[y];
if (nx->rank <= ny->rank) {
return true;
}
if (!ForwardDFS(r, y, nx->rank)) {
nx_io->out.Erase(y);
ny_io->in.Erase(x);
ClearVisitedBits(r, r->deltaf_);
return false;
}
BackwardDFS(r, x, ny->rank);
Reorder(r);
return true;
}
static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
r->deltaf_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = &r->nodes_[n];
if (nn->visited) continue;
nn->visited = true;
r->deltaf_.push_back(n);
NodeIO* nn_io = &r->node_io_[n];
for (auto w : nn_io->out.GetSequence()) {
Node* nw = &r->nodes_[w];
if (nw->rank == upper_bound) {
return false;
}
if (!nw->visited && nw->rank < upper_bound) {
r->stack_.push_back(w);
}
}
}
return true;
}
static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
r->deltab_.clear();
r->stack_.clear();
r->stack_.push_back(n);
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
Node* nn = &r->nodes_[n];
if (nn->visited) continue;
nn->visited = true;
r->deltab_.push_back(n);
NodeIO* nn_io = &r->node_io_[n];
for (auto w : nn_io->in.GetSequence()) {
Node* nw = &r->nodes_[w];
if (!nw->visited && lower_bound < nw->rank) {
r->stack_.push_back(w);
}
}
}
}
static void Reorder(GraphCycles::Rep* r) {
Sort(r->nodes_, &r->deltab_);
Sort(r->nodes_, &r->deltaf_);
r->list_.clear();
MoveToList(r, &r->deltab_, &r->list_);
MoveToList(r, &r->deltaf_, &r->list_);
r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
std::merge(r->deltab_.begin(), r->deltab_.end(), r->deltaf_.begin(),
r->deltaf_.end(), r->merged_.begin());
for (size_t i = 0; i < r->list_.size(); i++) {
r->nodes_[r->list_[i]].rank = r->merged_[i];
}
}
static void Sort(absl::Span<const Node> nodes, std::vector<int32_t>* delta) {
std::sort(delta->begin(), delta->end(), [&](int32_t a, int32_t b) {
return nodes[a].rank < nodes[b].rank;
});
}
static void MoveToList(GraphCycles::Rep* r, std::vector<int32_t>* src,
std::vector<int32_t>* dst) {
for (size_t i = 0; i < src->size(); i++) {
int32_t w = (*src)[i];
(*src)[i] = r->nodes_[w].rank;
r->nodes_[w].visited = false;
dst->push_back(w);
}
}
static void ClearVisitedBits(GraphCycles::Rep* r,
absl::Span<const int32_t> visited_indices) {
for (auto index : visited_indices) {
r->nodes_[index].visited = false;
}
}
int GraphCycles::FindPath(int32_t x, int32_t y, int max_path_len,
int32_t path[]) const {
int path_len = 0;
Rep* r = rep_;
NodeSet seen;
r->stack_.clear();
r->stack_.push_back(x);
while (!r->stack_.empty()) {
int32_t n = r->stack_.back();
r->stack_.pop_back();
if (n < 0) {
path_len--;
continue;
}
if (path_len < max_path_len) {
path[path_len] = n;
}
path_len++;
r->stack_.push_back(-1);
if (n == y) {
return path_len;
}
for (auto w : r->node_io_[n].out.GetSequence()) {
if (seen.insert(w).second) {
r->stack_.push_back(w);
}
}
}
return 0;
}
bool GraphCycles::IsReachable(int32_t x, int32_t y) const {
return FindPath(x, y, 0, nullptr) > 0;
}
bool GraphCycles::IsReachableNonConst(int32_t x, int32_t y) {
if (x == y) return true;
Rep* r = rep_;
Node* nx = &r->nodes_[x];
Node* ny = &r->nodes_[y];
if (nx->rank >= ny->rank) {
return false;
}
bool reachable = !ForwardDFS(r, x, ny->rank);
ClearVisitedBits(r, r->deltaf_);
return reachable;
}
bool GraphCycles::CanContractEdge(int32_t a, int32_t b) {
CHECK(HasEdge(a, b)) << "No edge exists from " << a << " to " << b;
RemoveEdge(a, b);
bool reachable = IsReachableNonConst(a, b);
InsertEdge(a, b);
return !reachable;
}
std::optional<int32_t> GraphCycles::ContractEdge(int32_t a, int32_t b) {
CHECK(HasEdge(a, b));
RemoveEdge(a, b);
if (IsReachableNonConst(a, b)) {
InsertEdge(a, b);
return std::nullopt;
}
if (rep_->node_io_[b].in.Size() + rep_->node_io_[b].out.Size() >
rep_->node_io_[a].in.Size() + rep_->node_io_[a].out.Size()) {
std::swap(a, b);
}
NodeIO* nb_io = &rep_->node_io_[b];
OrderedNodeSet out = std::move(nb_io->out);
OrderedNodeSet in = std::move(nb_io->in);
for (int32_t y : out.GetSequence()) {
rep_->node_io_[y].in.Erase(b);
}
for (int32_t y : in.GetSequence()) {
rep_->node_io_[y].out.Erase(b);
}
rep_->free_nodes_.push_back(b);
rep_->node_io_[a].out.Reserve(rep_->node_io_[a].out.Size() + out.Size());
for (int32_t y : out.GetSequence()) {
InsertEdge(a, y);
}
rep_->node_io_[a].in.Reserve(rep_->node_io_[a].in.Size() + in.Size());
for (int32_t y : in.GetSequence()) {
InsertEdge(y, a);
}
return a;
}
absl::Span<const int32_t> GraphCycles::Successors(int32_t node) const {
return rep_->node_io_[node].out.GetSequence();
}
absl::Span<const int32_t> GraphCycles::Predecessors(int32_t node) const {
return rep_->node_io_[node].in.GetSequence();
}
std::vector<int32_t> GraphCycles::SuccessorsCopy(int32_t node) const {
absl::Span<const int32_t> successors = Successors(node);
return std::vector<int32_t>(successors.begin(), successors.end());
}
std::vector<int32_t> GraphCycles::PredecessorsCopy(int32_t node) const {
absl::Span<const int32_t> predecessors = Predecessors(node);
return std::vector<int32_t>(predecessors.begin(), predecessors.end());
}
namespace {
void SortInPostOrder(absl::Span<const Node> nodes,
std::vector<int32_t>* to_sort) {
absl::c_sort(*to_sort, [&](int32_t a, int32_t b) {
DCHECK(a == b || nodes[a].rank != nodes[b].rank);
return nodes[a].rank > nodes[b].rank;
});
}
}
std::vector<int32_t> GraphCycles::AllNodesInPostOrder() const {
absl::flat_hash_set<int32_t> free_nodes_set;
absl::c_copy(rep_->free_nodes_,
std::inserter(free_nodes_set, free_nodes_set.begin()));
std::vector<int32_t> all_nodes;
all_nodes.reserve(rep_->nodes_.size() - free_nodes_set.size());
for (int64_t i = 0, e = rep_->nodes_.size(); i < e; i++) {
if (!free_nodes_set.contains(i)) {
all_nodes.push_back(i);
}
}
SortInPostOrder(rep_->nodes_, &all_nodes);
return all_nodes;
}
std::string GraphCycles::DebugString() const {
absl::flat_hash_set<int32_t> free_nodes_set(rep_->free_nodes_.begin(),
rep_->free_nodes_.end());
std::string result = "digraph {\n";
for (int i = 0, end = rep_->nodes_.size(); i < end; i++) {
if (free_nodes_set.contains(i)) {
continue;
}
for (int32_t succ : rep_->node_io_[i].out.GetSequence()) {
absl::StrAppend(&result, " \"", i, "\" -> \"", succ, "\"\n");
}
}
absl::StrAppend(&result, "}\n");
return result;
}
} | #include "xla/service/graphcycles/graphcycles.h"
#include <cstdint>
#include <optional>
#include <random>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/random/random.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
typedef std::vector<int> Nodes;
struct Edge {
int from;
int to;
};
typedef std::vector<Edge> Edges;
static bool IsReachable(Edges *edges, int from, int to,
absl::flat_hash_set<int> *seen) {
seen->insert(from);
if (from == to) return true;
for (int i = 0; i != edges->size(); i++) {
Edge *edge = &(*edges)[i];
if (edge->from == from) {
if (edge->to == to) {
return true;
} else if (seen->find(edge->to) == seen->end() &&
IsReachable(edges, edge->to, to, seen)) {
return true;
}
}
}
return false;
}
static void PrintNodes(Nodes *nodes) {
LOG(INFO) << "NODES (" << nodes->size() << ")";
for (int i = 0; i != nodes->size(); i++) {
LOG(INFO) << (*nodes)[i];
}
}
static void PrintEdges(Edges *edges) {
LOG(INFO) << "EDGES (" << edges->size() << ")";
for (int i = 0; i != edges->size(); i++) {
int a = (*edges)[i].from;
int b = (*edges)[i].to;
LOG(INFO) << a << " " << b;
}
LOG(INFO) << "---";
}
static void PrintGCEdges(Nodes *nodes, tensorflow::GraphCycles *gc) {
LOG(INFO) << "GC EDGES";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->HasEdge(a, b)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintTransitiveClosure(Nodes *nodes, Edges *edges,
tensorflow::GraphCycles *gc) {
LOG(INFO) << "Transitive closure";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
absl::flat_hash_set<int> seen;
if (IsReachable(edges, a, b, &seen)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void PrintGCTransitiveClosure(Nodes *nodes,
tensorflow::GraphCycles *gc) {
LOG(INFO) << "GC Transitive closure";
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->IsReachable(a, b)) {
LOG(INFO) << a << " " << b;
}
}
}
LOG(INFO) << "---";
}
static void CheckTransitiveClosure(Nodes *nodes, Edges *edges,
tensorflow::GraphCycles *gc) {
absl::flat_hash_set<int> seen;
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
seen.clear();
int a = (*nodes)[i];
int b = (*nodes)[j];
bool gc_reachable = gc->IsReachable(a, b);
CHECK_EQ(gc_reachable, gc->IsReachableNonConst(a, b));
bool reachable = IsReachable(edges, a, b, &seen);
if (gc_reachable != reachable) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
PrintTransitiveClosure(nodes, edges, gc);
PrintGCTransitiveClosure(nodes, gc);
LOG(FATAL) << "gc_reachable " << gc_reachable << " reachable "
<< reachable << " a " << a << " b " << b;
}
}
}
}
static void CheckEdges(Nodes *nodes, Edges *edges,
tensorflow::GraphCycles *gc) {
int count = 0;
for (int i = 0; i != edges->size(); i++) {
int a = (*edges)[i].from;
int b = (*edges)[i].to;
if (!gc->HasEdge(a, b)) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
LOG(FATAL) << "!gc->HasEdge(" << a << ", " << b << ")";
}
}
for (int i = 0; i != nodes->size(); i++) {
for (int j = 0; j != nodes->size(); j++) {
int a = (*nodes)[i];
int b = (*nodes)[j];
if (gc->HasEdge(a, b)) {
count++;
}
}
}
if (count != edges->size()) {
PrintEdges(edges);
PrintGCEdges(nodes, gc);
LOG(FATAL) << "edges->size() " << edges->size() << " count " << count;
}
}
static int RandomNode(std::mt19937 *rnd, Nodes *nodes) {
std::uniform_int_distribution<int> distribution(0, nodes->size() - 1);
return distribution(*rnd);
}
static int RandomEdge(std::mt19937 *rnd, Edges *edges) {
std::uniform_int_distribution<int> distribution(0, edges->size() - 1);
return distribution(*rnd);
}
static int EdgeIndex(Edges *edges, int from, int to) {
int i = 0;
while (i != edges->size() &&
((*edges)[i].from != from || (*edges)[i].to != to)) {
i++;
}
return i == edges->size() ? -1 : i;
}
TEST(GraphCycles, RandomizedTest) {
Nodes nodes;
Edges edges;
tensorflow::GraphCycles graph_cycles;
static const int kMaxNodes = 7;
static const int kDataOffset = 17;
int n = 100000;
int op = 0;
std::mt19937 rnd(tsl::testing::RandomSeed() + 1);
for (int iter = 0; iter != n; iter++) {
if ((iter % 10000) == 0) VLOG(0) << "Iter " << iter << " of " << n;
if (VLOG_IS_ON(3)) {
LOG(INFO) << "===============";
LOG(INFO) << "last op " << op;
PrintNodes(&nodes);
PrintEdges(&edges);
PrintGCEdges(&nodes, &graph_cycles);
}
for (int i = 0; i != nodes.size(); i++) {
ASSERT_EQ(reinterpret_cast<intptr_t>(graph_cycles.GetNodeData(i)),
i + kDataOffset)
<< " node " << i;
}
CheckEdges(&nodes, &edges, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, &graph_cycles);
std::uniform_int_distribution<int> distribution(0, 5);
op = distribution(rnd);
switch (op) {
case 0:
if (nodes.size() < kMaxNodes) {
int new_node = graph_cycles.NewNode();
ASSERT_NE(-1, new_node);
VLOG(1) << "adding node " << new_node;
ASSERT_EQ(nullptr, graph_cycles.GetNodeData(new_node));
graph_cycles.SetNodeData(
new_node, reinterpret_cast<void *>(
static_cast<intptr_t>(new_node + kDataOffset)));
ASSERT_GE(new_node, 0);
for (int i = 0; i != nodes.size(); i++) {
ASSERT_NE(nodes[i], new_node);
}
nodes.push_back(new_node);
}
break;
case 1:
if (!nodes.empty()) {
int node_index = RandomNode(&rnd, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
VLOG(1) << "removing node " << node;
graph_cycles.RemoveNode(node);
int i = 0;
while (i != edges.size()) {
if (edges[i].from == node || edges[i].to == node) {
edges[i] = edges.back();
edges.pop_back();
} else {
i++;
}
}
}
break;
case 2:
if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
if (EdgeIndex(&edges, nodes[from], nodes[to]) == -1) {
if (graph_cycles.InsertEdge(nodes[from], nodes[to])) {
Edge new_edge;
new_edge.from = nodes[from];
new_edge.to = nodes[to];
edges.push_back(new_edge);
} else {
absl::flat_hash_set<int> seen;
ASSERT_TRUE(IsReachable(&edges, nodes[to], nodes[from], &seen))
<< "Edge " << nodes[to] << "->" << nodes[from];
}
}
}
break;
case 3:
if (!edges.empty()) {
int i = RandomEdge(&rnd, &edges);
int from = edges[i].from;
int to = edges[i].to;
ASSERT_EQ(i, EdgeIndex(&edges, from, to));
edges[i] = edges.back();
edges.pop_back();
ASSERT_EQ(-1, EdgeIndex(&edges, from, to));
VLOG(1) << "removing edge " << from << " " << to;
graph_cycles.RemoveEdge(from, to);
}
break;
case 4:
if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
int32_t path[2 * kMaxNodes];
int path_len = graph_cycles.FindPath(nodes[from], nodes[to],
2 * kMaxNodes, path);
absl::flat_hash_set<int> seen;
bool reachable = IsReachable(&edges, nodes[from], nodes[to], &seen);
bool gc_reachable = graph_cycles.IsReachable(nodes[from], nodes[to]);
ASSERT_EQ(gc_reachable,
graph_cycles.IsReachableNonConst(nodes[from], nodes[to]));
ASSERT_EQ(path_len != 0, reachable);
ASSERT_EQ(path_len != 0, gc_reachable);
ASSERT_LE(path_len, kMaxNodes + 1);
if (path_len != 0) {
ASSERT_EQ(nodes[from], path[0]);
ASSERT_EQ(nodes[to], path[path_len - 1]);
for (int i = 1; i < path_len; i++) {
ASSERT_NE(-1, EdgeIndex(&edges, path[i - 1], path[i]));
ASSERT_TRUE(graph_cycles.HasEdge(path[i - 1], path[i]));
}
}
}
break;
case 5:
CHECK(graph_cycles.CheckInvariants());
break;
default:
LOG(FATAL);
}
std::bernoulli_distribution rarely(1.0 / 1024.0);
if (rarely(rnd)) {
VLOG(3) << "Graph expansion";
CheckEdges(&nodes, &edges, &graph_cycles);
CheckTransitiveClosure(&nodes, &edges, &graph_cycles);
for (int i = 0; i != 256; i++) {
int new_node = graph_cycles.NewNode();
ASSERT_NE(-1, new_node);
VLOG(1) << "adding node " << new_node;
ASSERT_GE(new_node, 0);
ASSERT_EQ(nullptr, graph_cycles.GetNodeData(new_node));
graph_cycles.SetNodeData(
new_node, reinterpret_cast<void *>(
static_cast<intptr_t>(new_node + kDataOffset)));
for (int j = 0; j != nodes.size(); j++) {
ASSERT_NE(nodes[j], new_node);
}
nodes.push_back(new_node);
}
for (int i = 0; i != 256; i++) {
ASSERT_GT(nodes.size(), 0);
int node_index = RandomNode(&rnd, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
nodes.pop_back();
VLOG(1) << "removing node " << node;
graph_cycles.RemoveNode(node);
int j = 0;
while (j != edges.size()) {
if (edges[j].from == node || edges[j].to == node) {
edges[j] = edges.back();
edges.pop_back();
} else {
j++;
}
}
}
CHECK(graph_cycles.CheckInvariants());
}
}
}
class GraphCyclesTest : public ::testing::Test {
public:
tensorflow::GraphCycles g_;
GraphCyclesTest() {
for (int i = 0; i < 100; i++) {
CHECK_EQ(i, g_.NewNode());
}
CHECK(g_.CheckInvariants());
}
bool AddEdge(int x, int y) { return g_.InsertEdge(x, y); }
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2 * x)) << x;
EXPECT_TRUE(AddEdge(x, 3 * x)) << x;
}
CHECK(g_.CheckInvariants());
}
std::string Path(int x, int y) {
static const int kPathSize = 5;
int32_t path[kPathSize];
int np = g_.FindPath(x, y, kPathSize, path);
std::string result;
for (int i = 0; i < np; i++) {
if (i >= kPathSize) {
result += " ...";
break;
}
if (!result.empty()) result.push_back(' ');
char buf[20];
snprintf(buf, sizeof(buf), "%d", path[i]);
result += buf;
}
return result;
}
};
TEST_F(GraphCyclesTest, NoCycle) {
AddMultiples();
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
EXPECT_EQ("4 8", Path(4, 8));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
CHECK(g_.CheckInvariants());
EXPECT_FALSE(AddEdge(9, 2));
EXPECT_EQ("2 4 8 16 9", Path(2, 9));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, LongPath) {
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(4, 6));
ASSERT_TRUE(AddEdge(6, 8));
ASSERT_TRUE(AddEdge(8, 10));
ASSERT_TRUE(AddEdge(10, 12));
ASSERT_FALSE(AddEdge(12, 2));
EXPECT_EQ("2 4 6 8 10 ...", Path(2, 12));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, RemoveNode) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(3, 4));
ASSERT_TRUE(AddEdge(4, 5));
g_.RemoveNode(3);
ASSERT_TRUE(AddEdge(5, 1));
}
TEST_F(GraphCyclesTest, ManyEdges) {
const int N = 50;
for (int i = 0; i < N; i++) {
for (int j = 1; j < N; j++) {
ASSERT_TRUE(AddEdge(i, i + j));
}
}
CHECK(g_.CheckInvariants());
ASSERT_TRUE(AddEdge(2 * N - 1, 0));
CHECK(g_.CheckInvariants());
ASSERT_FALSE(AddEdge(10, 9));
CHECK(g_.CheckInvariants());
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(g_.ContractEdge(1, 2).value(), 2);
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(g_.ContractEdge(2, 3).value(), 2);
CHECK(g_.CheckInvariants());
EXPECT_TRUE(g_.HasEdge(2, 4));
}
TEST_F(GraphCyclesTest, CanContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.CanContractEdge(1, 3));
EXPECT_FALSE(g_.CanContractEdge(2, 4));
EXPECT_TRUE(g_.CanContractEdge(1, 2));
EXPECT_TRUE(g_.CanContractEdge(2, 3));
EXPECT_TRUE(g_.CanContractEdge(3, 4));
}
static void BM_StressTest(::testing::benchmark::State &state) {
const int num_nodes = state.range(0);
while (state.KeepRunningBatch(num_nodes)) {
tensorflow::GraphCycles g;
int32_t *nodes = new int32_t[num_nodes];
for (int i = 0; i < num_nodes; i++) {
nodes[i] = g.NewNode();
}
for (int i = 0; i < num_nodes; i++) {
int end = std::min(num_nodes, i + 5);
for (int j = i + 1; j < end; j++) {
if (nodes[i] >= 0 && nodes[j] >= 0) {
CHECK(g.InsertEdge(nodes[i], nodes[j]));
}
}
}
delete[] nodes;
}
}
BENCHMARK(BM_StressTest)->Range(2048, 1048576);
static void BM_ContractEdge(::testing::benchmark::State &state) {
const int num_nodes = state.range(0);
while (state.KeepRunningBatch(num_nodes)) {
state.PauseTiming();
tensorflow::GraphCycles g;
std::vector<int32_t> nodes;
nodes.reserve(num_nodes);
for (int i = 0; i < num_nodes; i++) {
nodes.push_back(g.NewNode());
}
for (int i = 0; i < num_nodes - 1; ++i) {
g.InsertEdge(nodes[i], nodes[num_nodes - 1]);
}
state.ResumeTiming();
int node = num_nodes - 1;
for (int i = 0; i < num_nodes - 1; ++i) {
node = g.ContractEdge(nodes[i], node).value();
}
}
}
BENCHMARK(BM_ContractEdge)->Arg(1000)->Arg(10000);
static void BM_IsReachableNonConst(testing::benchmark::State &state) {
const int num_nodes = state.range(0);
tensorflow::GraphCycles g;
std::vector<uint32_t> nodes;
nodes.reserve(num_nodes);
for (int i = 0; i < num_nodes; i++) {
nodes.push_back(g.NewNode());
}
absl::BitGen bitgen;
for (int i = 0; i < num_nodes; i++) {
int max = num_nodes - 1 - i;
if (max == 0) break;
constexpr int branch_factor = 2;
for (int b = 0; b < branch_factor; b++) {
int j = i + 1 + absl::Uniform(bitgen, 0, max);
CHECK_LT(j, num_nodes);
CHECK(g.InsertEdge(nodes[i], nodes[j]));
}
}
auto get_random_node = [&]() {
return nodes[absl::Uniform(bitgen, 0, num_nodes)];
};
uint32_t src, dst;
int i = 0;
for (auto s : state) {
if (i % 256 == 0) {
src = get_random_node();
dst = get_random_node();
}
bool reachable = g.IsReachableNonConst(src, dst);
benchmark::DoNotOptimize(reachable);
i++;
}
}
BENCHMARK(BM_IsReachableNonConst)
->Arg(10)
->Arg(50)
->Arg(100)
->Arg(200)
->Arg(1000)
->Arg(30000); | 2,033 |
#ifndef XLA_SERVICE_GPU_REDUCTION_DIMENSION_GROUPER_H_
#define XLA_SERVICE_GPU_REDUCTION_DIMENSION_GROUPER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionDimensionGrouper : public HloModulePass {
public:
absl::string_view name() const override {
return "reduction-dimension-grouper";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/reduction_dimension_grouper.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReduceDimensionGroupVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction *hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
VLOG(4) << "Input: " << reduce->ToString();
absl::InlinedVector<HloInstruction *, 2> reduce_inputs_grouped;
std::vector<int64_t> reduced_dims_grouped;
int idx = -1;
for (HloInstruction *operand : reduce->inputs()) {
idx++;
std::vector<int64_t> new_grouped_dims;
const Shape &shape = operand->shape();
CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape))
<< "Default layout should be enforced on reduction operand";
auto is_reduced = [&](int dim) {
return absl::c_linear_search(reduce->dimensions(), dim);
};
bool changed = false;
int64_t next_dim_size = 1;
for (int logical_dim = 0; logical_dim < shape.rank(); logical_dim++) {
VLOG(5) << "Processing dimension " << logical_dim << " of size "
<< shape.dimensions(logical_dim);
if (is_reduced(logical_dim) && logical_dim < shape.rank() - 1 &&
is_reduced(logical_dim + 1)) {
VLOG(5) << "This and consecutive dimension are reduced, merging";
changed = true;
next_dim_size *= shape.dimensions(logical_dim);
continue;
}
if (is_reduced(logical_dim)) {
new_grouped_dims.push_back(next_dim_size *
shape.dimensions(logical_dim));
if (idx == 0) {
reduced_dims_grouped.push_back(new_grouped_dims.size() - 1);
}
next_dim_size = 1;
} else {
new_grouped_dims.push_back(shape.dimensions(logical_dim));
}
}
if (!changed) {
return absl::OkStatus();
}
Shape grouped_shape =
ShapeUtil::MakeShape(shape.element_type(), new_grouped_dims);
reduce_inputs_grouped.push_back(reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(grouped_shape, operand),
&operand->metadata()));
VLOG(5) << "Adding bitcast: " << reduce_inputs_grouped.back()->ToString();
}
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
reduce->shape(), reduce_inputs_grouped, reduce->init_values(),
reduced_dims_grouped, reduce->to_apply());
VLOG(5) << "Generated new reduction: " << new_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionDimensionGrouper::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed, ReduceDimensionGroupVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_dimension_grouper.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionDimensionGrouperTest : public HloTestBase {
public:
void CheckDimensionGrouper(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionDimensionGrouper{}, expected);
}
};
TEST_F(ReductionDimensionGrouperTest, ReductionWithGrouping) {
const char* hlo = R"(
HloModule ReductionWithGrouping
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[100,10,32,3]{3,2,1,0} parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100,10]{0,1} reduce(input, zero), dimensions={2,3}, to_apply=add
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
TEST_F(ReductionDimensionGrouperTest, ReductionWithGroupingVariadic) {
const char* hlo = R"(
HloModule ReductionWithGrouping
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
input = f32[100,10,32,3]{3,2,1,0} parameter(0)
idxs = u32[100,10,32,3]{3,2,1,0} parameter(1)
zero = f32[] constant(0)
zero_idx = u32[] constant(0)
ROOT out = (f32[100,10]{1,0}, u32[100,10]{1,0}) reduce(input, idxs, zero, zero_idx), dimensions={2,3}, to_apply=argmax
}
)";
CheckDimensionGrouper(hlo, R"(
)");
}
}
} | 2,034 |
#ifndef XLA_SERVICE_GPU_GPU_LAYOUT_ASSIGNMENT_H_
#define XLA_SERVICE_GPU_GPU_LAYOUT_ASSIGNMENT_H_
#include <cstdint>
#include <initializer_list>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/computation_layout.h"
#include "xla/service/layout_assignment.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class GpuLayoutAssignment : public LayoutAssignment {
public:
explicit GpuLayoutAssignment(
ComputationLayout* entry_computation_layout,
const se::GpuComputeCapability& gpu_version,
const se::dnn::VersionInfo& dnn_version,
ChannelLayoutConstraints* channel_constraints = nullptr)
: LayoutAssignment(entry_computation_layout, channel_constraints),
gpu_version_(gpu_version),
dnn_version_(dnn_version) {}
~GpuLayoutAssignment() override = default;
protected:
absl::Status AddBackendConstraints(LayoutConstraints* constraints) override;
private:
absl::Status AddBackendConstraintsToDnnConvCustomCall(
HloCustomCallInstruction* instr, LayoutConstraints* constraints);
absl::Status SetOperandMajorToMinorLayout(
const HloInstruction* instruction, int64_t operand,
std::initializer_list<absl::Span<const int64_t>> dim_groups);
absl::Status SetDotOperandLayout(const HloInstruction* instruction,
int64_t operand,
absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims,
absl::Span<const int64_t> col_dims);
absl::Status SetDotLayout(const HloInstruction* instruction,
LayoutConstraints* constraints);
bool PropagateReductionLayoutToOperand(const HloInstruction* user) override;
bool InstructionCanChangeLayoutInstance(
const HloInstruction* instruction) override;
const se::GpuComputeCapability gpu_version_;
const se::dnn::VersionInfo dnn_version_;
};
}
}
#endif
#include "xla/service/gpu/gpu_layout_assignment.h"
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tsl/util/env_var.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
using se::dnn::DataLayout;
using se::dnn::FilterLayout;
static std::tuple<DataLayout, FilterLayout, DataLayout>
HeuristicLayoutAssignment(const HloInstruction* instr,
const se::GpuComputeCapability& gpu_version,
const se::dnn::VersionInfo& dnn_version) {
constexpr auto kAllNCHW =
std::make_tuple(DataLayout::kBatchDepthYX, FilterLayout::kOutputInputYX,
DataLayout::kBatchDepthYX);
constexpr auto kAllNCHW_VECT_C =
std::make_tuple(DataLayout::kBatchDepthYX4, FilterLayout::kOutputInputYX4,
DataLayout::kBatchDepthYX4);
constexpr auto kAllNHWC =
std::make_tuple(DataLayout::kBatchYXDepth, FilterLayout::kOutputYXInput,
DataLayout::kBatchYXDepth);
const ConvolutionDimensionNumbers& dnums =
instr->convolution_dimension_numbers();
Shape input_shape = instr->operand(0)->shape();
PrimitiveType input_ty = instr->operand(0)->shape().element_type();
if (primitive_util::IsIntegralType(input_ty)) {
if (input_ty == S8 && dnums.input_spatial_dimensions_size() == 2 &&
input_shape.dimensions_size() == 5) {
VLOG(2) << "Using NCHW_VECT_C for int8_t conv " << instr->ToString();
return kAllNCHW_VECT_C;
}
VLOG(2) << "Using NHWC for int8_t conv " << instr->ToString();
return kAllNHWC;
}
if (primitive_util::IsF8Type(input_ty)) {
VLOG(2) << "Using NHWC for FP8 conv " << instr->ToString();
return kAllNHWC;
}
const DebugOptions& debug_options =
instr->GetModule()->config().debug_options();
if (debug_options.xla_gpu_force_conv_nchw()) {
VLOG(2) << "Overriding layout to NCHW for " << instr->ToString();
return kAllNCHW;
}
if (debug_options.xla_gpu_force_conv_nhwc()) {
VLOG(2) << "Overriding layout to NHWC for " << instr->ToString();
return kAllNHWC;
}
const auto* rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
if (rocm_compute_capability && input_ty == F16) return kAllNHWC;
const bool isFloat16 = (input_ty == F16) || (input_ty == BF16);
if (std::holds_alternative<se::CudaComputeCapability>(gpu_version)) {
const auto* cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
bool is_volta =
cuda_compute_capability &&
cuda_compute_capability->IsAtLeast(se::CudaComputeCapability::VOLTA);
if (!isFloat16 || !is_volta ||
instr->shape().tuple_shapes(0).dimensions_size() != 4) {
return kAllNCHW;
}
if (std::make_tuple(dnn_version.major_version(),
dnn_version.minor_version()) <= std::make_tuple(7, 3) &&
instr->custom_call_target() == kCudnnConvBackwardInputCallTarget &&
window_util::HasStride(instr->window())) {
return kAllNCHW;
}
} else if (std::holds_alternative<se::RocmComputeCapability>(gpu_version)) {
bool is_enabled = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_USE_ROCM_NHWC",
false, &is_enabled));
auto rocm_compute_capability =
std::get<se::RocmComputeCapability>(gpu_version);
if (!isFloat16 || (!rocm_compute_capability.has_nhwc_layout_support()) ||
instr->shape().tuple_shapes(0).dimensions_size() != 4 || !is_enabled) {
return kAllNCHW;
}
}
VLOG(2) << "Using heuristic to figure out layouts for " << instr->ToString();
return kAllNHWC;
}
absl::Status GpuLayoutAssignment::AddBackendConstraintsToDnnConvCustomCall(
HloCustomCallInstruction* instr, LayoutConstraints* constraints) {
Shape lhs_shape = instr->operand(0)->shape();
Shape rhs_shape = instr->operand(1)->shape();
Shape result_shape = instr->shape().tuple_shapes(0);
Shape* input_shape;
Shape* filter_shape;
Shape* output_shape;
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(instr));
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
input_shape = &lhs_shape;
filter_shape = &rhs_shape;
output_shape = &result_shape;
break;
case CudnnConvKind::kBackwardInput:
input_shape = &result_shape;
filter_shape = &rhs_shape;
output_shape = &lhs_shape;
break;
case CudnnConvKind::kBackwardFilter:
input_shape = &lhs_shape;
filter_shape = &result_shape;
output_shape = &rhs_shape;
break;
}
{
DataLayout input;
FilterLayout filter;
DataLayout output;
std::tie(input, filter, output) =
HeuristicLayoutAssignment(instr, gpu_version_, dnn_version_);
TF_ASSIGN_OR_RETURN(
std::tie(*input_shape->mutable_layout(),
*filter_shape->mutable_layout(),
*output_shape->mutable_layout()),
StreamExecutorConvLayoutsToXlaLayouts(
instr->convolution_dimension_numbers(), input, filter, output));
}
TF_ASSIGN_OR_RETURN(
const LogicalBuffer* call_result_buf,
points_to_analysis_->GetBufferDefinedAt(instr, {0}));
TF_RETURN_IF_ERROR(SetOperandLayout(lhs_shape, instr, 0));
TF_RETURN_IF_ERROR(SetOperandLayout(rhs_shape, instr, 1));
TF_RETURN_IF_ERROR(SetBufferLayout(result_shape.layout(), *call_result_buf));
if (kind == CudnnConvKind::kForwardActivation &&
instr->operand_count() == 4) {
TF_RETURN_IF_ERROR(SetOperandLayout(*output_shape, instr, 3));
}
if (kind == CudnnConvKind::kForwardGraph) {
for (int k = 2; k < instr->operand_count(); ++k) {
if (!ShapeUtil::IsScalar(instr->operand(k)->shape())) {
TF_RETURN_IF_ERROR(SetOperandLayout(*output_shape, instr, k));
}
}
}
if (instr->operand_count() > 2 && kind != CudnnConvKind::kForwardActivation &&
kind != CudnnConvKind::kForwardGraph) {
return Internal(
"Invalid convolution. Conv has a side input, but kind is not fused "
"conv forward or graph conv foward: %s",
instr->ToString());
}
return absl::OkStatus();
}
namespace {
void SetFortranLayout(Shape* shape) {
LayoutUtil::SetToDefaultLayout(shape);
int n = shape->mutable_layout()->minor_to_major_size();
CHECK_GE(n, 2);
std::swap(shape->mutable_layout()->mutable_minor_to_major()->at(0),
shape->mutable_layout()->mutable_minor_to_major()->at(1));
}
bool DotCanSupportShapeWithLayout(const HloInstruction* dot,
const Shape& shape) {
const DotDimensionNumbers& dot_dims = dot->dot_dimension_numbers();
return MatrixLayout::For(shape, dot_dims.lhs_batch_dimensions().size(),
dot->operand(0)->shape().rank() -
dot_dims.lhs_contracting_dimensions().size() -
dot_dims.lhs_batch_dimensions().size(),
dot_dims.rhs_batch_dimensions().size(),
dot->operand(1)->shape().rank() -
dot_dims.rhs_contracting_dimensions().size() -
dot_dims.rhs_batch_dimensions().size())
.ok();
}
}
absl::Status GpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
auto post_order = constraints->computation()->MakeInstructionPostOrder();
for (auto iterator = post_order.rbegin(); iterator != post_order.rend();
++iterator) {
HloInstruction* instruction = *iterator;
if (IsCustomCallToDnnConvolution(*instruction)) {
TF_RETURN_IF_ERROR(AddBackendConstraintsToDnnConvCustomCall(
Cast<HloCustomCallInstruction>(instruction), constraints));
}
CHECK(!IsCublasGemm(*instruction))
<< "Gemm rewriting should run after layout assignment";
if (instruction->opcode() == HloOpcode::kDot) {
const Shape& output_shape = instruction->shape();
const Shape& lhs_shape = instruction->operand(0)->shape();
const Shape& rhs_shape = instruction->operand(1)->shape();
const DotDimensionNumbers& dot_dims =
instruction->dot_dimension_numbers();
absl::Span<const int64_t> lhs_batch_dims =
dot_dims.lhs_batch_dimensions();
absl::Span<const int64_t> lhs_contracting_dims =
dot_dims.lhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> lhs_non_contracting_dims,
GetNonContractingDims(lhs_shape, lhs_batch_dims,
lhs_contracting_dims));
absl::Span<const int64_t> rhs_batch_dims =
dot_dims.rhs_batch_dimensions();
absl::Span<const int64_t> rhs_contracting_dims =
dot_dims.rhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> rhs_non_contracting_dims,
GetNonContractingDims(rhs_shape, rhs_batch_dims,
rhs_contracting_dims));
const DebugOptions& debug_options =
instruction->GetModule()->config().debug_options();
bool is_bf16_to_bf16 =
(output_shape.element_type() == PrimitiveType::BF16 &&
lhs_shape.element_type() == PrimitiveType::BF16 &&
rhs_shape.element_type() == PrimitiveType::BF16);
bool is_s8_to_s32 = (output_shape.element_type() == PrimitiveType::S32 &&
lhs_shape.element_type() == PrimitiveType::S8 &&
rhs_shape.element_type() == PrimitiveType::S8 &&
output_shape.dimensions_size() == 2 &&
lhs_shape.dimensions_size() == 2 &&
rhs_shape.dimensions_size() == 2);
if (is_s8_to_s32 ||
(is_bf16_to_bf16 &&
debug_options.xla_gpu_ensure_minor_dot_contraction_dims())) {
TF_RETURN_IF_ERROR(SetOperandMajorToMinorLayout(
instruction, 0,
{lhs_batch_dims, lhs_non_contracting_dims, lhs_contracting_dims}));
TF_RETURN_IF_ERROR(SetOperandMajorToMinorLayout(
instruction, 1,
{rhs_batch_dims, rhs_non_contracting_dims, rhs_contracting_dims}));
TF_RETURN_IF_ERROR(SetDotLayout(instruction, constraints));
} else {
if (!lhs_batch_dims.empty() || lhs_contracting_dims.size() > 1 ||
lhs_non_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotOperandLayout(instruction, 0, lhs_batch_dims,
lhs_contracting_dims,
lhs_non_contracting_dims));
}
if (!rhs_batch_dims.empty() || rhs_non_contracting_dims.size() > 1 ||
rhs_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotOperandLayout(instruction, 1, rhs_batch_dims,
rhs_contracting_dims,
rhs_non_contracting_dims));
}
if (!lhs_batch_dims.empty() || lhs_non_contracting_dims.size() > 1 ||
rhs_non_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotLayout(instruction, constraints));
}
}
} else if (instruction->opcode() == HloOpcode::kTranspose) {
const HloInstruction* operand = instruction->operand(0);
if ((operand->opcode() != HloOpcode::kDot) ||
(operand->user_count() > 1)) {
continue;
}
Shape shape = operand->shape();
*shape.mutable_layout() =
LayoutUtil::MakeLayoutFromMajorToMinor(instruction->dimensions());
if (DotCanSupportShapeWithLayout(operand, shape)) {
TF_RETURN_IF_ERROR(
SetOperandLayout(shape, instruction, 0));
}
} else if (instruction->opcode() == HloOpcode::kFft) {
Shape op0_shape = instruction->operand(0)->shape();
LayoutUtil::SetToDefaultLayout(&op0_shape);
Shape output_shape = instruction->shape();
LayoutUtil::SetToDefaultLayout(&output_shape);
TF_RETURN_IF_ERROR(SetOperandLayout(op0_shape, instruction, 0));
TF_RETURN_IF_ERROR(SetInstructionLayout(output_shape, instruction));
} else if (instruction->opcode() == HloOpcode::kSort &&
instruction->operand(0)->shape().rank() > 1) {
Shape keys_shape = instruction->operand(0)->shape();
Layout keys_layout =
LayoutUtil::GetDefaultLayoutForRank(keys_shape.rank());
for (int64_t i = 0; i < instruction->operand_count(); ++i) {
Shape shape = instruction->operand(i)->shape();
*shape.mutable_layout() = keys_layout;
TF_RETURN_IF_ERROR(SetOperandLayout(shape, instruction, i));
const LogicalBuffer* output_buffer;
if (instruction->shape().IsArray()) {
TF_ASSIGN_OR_RETURN(
output_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {}));
} else {
TF_ASSIGN_OR_RETURN(
output_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {i}));
}
TF_RETURN_IF_ERROR(SetBufferLayout(keys_layout, *output_buffer));
}
} else if (instruction->opcode() == HloOpcode::kTriangularSolve) {
Shape op0_shape = instruction->operand(0)->shape();
Shape op1_shape = instruction->operand(1)->shape();
Shape output_shape = instruction->shape();
SetFortranLayout(&op0_shape);
SetFortranLayout(&op1_shape);
SetFortranLayout(&output_shape);
TF_RETURN_IF_ERROR(SetOperandLayout(op0_shape, instruction, 0));
TF_RETURN_IF_ERROR(SetOperandLayout(op1_shape, instruction, 1));
TF_RETURN_IF_ERROR(SetInstructionLayout(output_shape, instruction));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else if (instruction->opcode() == HloOpcode::kAllToAll &&
instruction->shape().IsArray()) {
auto* all_to_all = Cast<HloAllToAllInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(all_to_all->shape(),
*all_to_all->split_dimension()),
all_to_all));
} else if (instruction->opcode() == HloOpcode::kSend) {
Shape s = instruction->operand(0)->shape();
LayoutUtil::SetToDefaultLayout(&s);
TF_RETURN_IF_ERROR(SetInstructionLayout(s, instruction->operand(0)));
TF_RETURN_IF_ERROR(
SetArrayOperandLayout(s.layout(), instruction->operand(0), 0));
} else if (instruction->opcode() == HloOpcode::kRecv) {
Shape s = instruction->shape();
ShapeUtil::ForEachMutableSubshape(
&s, [&](Shape* subshape, const ShapeIndex& index) {
LayoutUtil::SetToDefaultLayout(subshape);
});
TF_RETURN_IF_ERROR(SetInstructionLayout(s, instruction));
}
}
return absl::OkStatus();
}
absl::Status GpuLayoutAssignment::SetDotOperandLayout(
const HloInstruction* instruction, int64_t operand,
absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> row_dims,
absl::Span<const int64_t> col_dims) {
Shape shape = instruction->operand(operand)->shape();
if (shape.has_layout() &&
MatrixLayout::For(shape, batch_dims, row_dims, col_dims).ok())
return SetOperandLayout(shape, instruction, operand);
LayoutUtil::SetToDefaultLayout(&shape);
if (MatrixLayout::For(shape, batch_dims, row_dims, col_dims).ok())
return SetOperandLayout(shape, instruction, operand);
return SetOperandMajorToMinorLayout(
instruction, operand,
{batch_dims, row_dims, col_dims});
}
absl::Status GpuLayoutAssignment::SetOperandMajorToMinorLayout(
const HloInstruction* instruction, int64_t operand,
std::initializer_list<absl::Span<const int64_t>> dim_groups) {
size_t size = 0;
for (auto group : dim_groups) size += group.size();
std::vector<int64_t> major_to_minor;
major_to_minor.reserve(size);
for (const auto& group : dim_groups) {
major_to_minor.insert(major_to_minor.end(), group.begin(), group.end());
}
Shape shape = instruction->operand(operand)->shape();
*shape.mutable_layout() =
LayoutUtil::MakeLayoutFromMajorToMinor(major_to_minor);
return SetOperandLayout(shape, instruction, operand);
}
absl::Status GpuLayoutAssignment::SetDotLayout(
const HloInstruction* instruction, LayoutConstraints* constraints) {
for (const HloInstruction* user : instruction->users()) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) != instruction) {
continue;
}
const ShapeLayout* constraint = constraints->OperandLayout(user, i);
if ((constraint != nullptr) &&
DotCanSupportShapeWithLayout(instruction, constraint->shape())) {
return SetInstructionLayout(constraint->shape(), instruction);
}
}
}
return SetInstructionLayout(
LayoutUtil::GetWithDefaultLayout(instruction->shape()), instruction);
}
bool GpuLayoutAssignment::PropagateReductionLayoutToOperand(
const HloInstruction* user) {
int64_t reduction_size = 1;
for (int64_t reduction_dim : user->dimensions()) {
reduction_size *= user->operand(0)->shape().dimensions(reduction_dim);
}
int64_t kept_dimension_size = ShapeUtil::ElementsIn(user->shape());
return IsUnnestedReductionFasterThanElemental(
{true, {1, kept_dimension_size, reduction_size}});
}
bool GpuLayoutAssignment::InstructionCanChangeLayoutInstance(
const HloInstruction* instruction) {
const HloCustomCallInstruction* custom_call =
DynCast<HloCustomCallInstruction>(instruction);
if (custom_call != nullptr &&
(custom_call->custom_call_target() ==
host_memory_offload_annotations::kMoveToHostCustomCallTarget ||
custom_call->custom_call_target() ==
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
return false;
}
return LayoutAssignment::InstructionCanChangeLayoutInstance(instruction);
}
}
} | #include "xla/service/gpu/gpu_layout_assignment.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::tsl::testing::IsOkAndHolds;
class LayoutAssignmentTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
se::GpuComputeCapability GetGpuComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
se::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor(),
se::dnn::VersionInfo{8, 3, 0});
}
};
TEST_F(LayoutAssignmentTest, Elementwise) {
Shape ashape = ShapeUtil::MakeShape(F32, {42, 12});
Shape ashape_in_row_major(ashape);
Shape ashape_in_col_major(ashape);
*ashape_in_row_major.mutable_layout() = LayoutUtil::MakeLayout({1, 0});
*ashape_in_col_major.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
for (const Shape& lhs_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
for (const Shape& rhs_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
for (const Shape& result_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
auto builder = HloComputation::Builder(TestName());
auto x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "x"));
auto y = builder.AddInstruction(
HloInstruction::CreateParameter(1, ashape, "y"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(ashape, HloOpcode::kAdd, x, y));
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(builder.Build(add));
ComputationLayout computation_layout(
computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(lhs_shape_with_layout);
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(rhs_shape_with_layout);
*computation_layout.mutable_result_layout() =
ShapeLayout(result_shape_with_layout);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
for (const HloInstruction* operand : add->operands()) {
EXPECT_TRUE(LayoutUtil::Equal(add->shape().layout(),
operand->shape().layout()));
}
}
}
}
}
TEST_F(LayoutAssignmentTest, DotLayoutUnchangedIfValid) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,2,3]{1,2,0} parameter(0)
p1 = f32[5,3,4]{1,2,0} parameter(1)
ROOT dot.1330.10585 = f32[5,2,4]{2,1,0} dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 2, 3}, {1, 2, 0}),
m::Op().WithShape(F32, {5, 3, 4}, {1, 2, 0}))
.WithShape(F32, {5, 2, 4}, {2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, DotLayoutSetToDefaultIfDefaultValid) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,3,2] parameter(0)
p1 = f32[5,4,3]{0,1,2} parameter(1)
ROOT dot.1330.10585 = f32[5,2,4] dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={1},
rhs_batch_dims={0}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 3, 2}, {2, 1, 0}),
m::Op().WithShape(F32, {5, 4, 3}, {2, 1, 0}))
.WithShape(F32, {5, 2, 4}, {2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, DotOperandLayoutSetToBatchRowsColsOtherwise) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[2,3,5]{2,1,0} parameter(0)
p1 = f32[3,4,5] parameter(1)
ROOT dot.1330.10585 = f32[5,2,4] dot(p0, p1),
lhs_batch_dims={2}, lhs_contracting_dims={1},
rhs_batch_dims={2}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {2, 3, 5}, {0, 1, 2}),
m::Op().WithShape(F32, {3, 4, 5}, {1, 0, 2}))));
}
TEST_F(LayoutAssignmentTest, DotOperandInconsistentDimLayouts) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,6,2,3] parameter(0)
p1 = f32[6,5,3,4] parameter(1)
ROOT dot.1330.10585 = f32[5,6,2,4] dot(p0, p1),
lhs_batch_dims={0,1}, lhs_contracting_dims={3},
rhs_batch_dims={1,0}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 6, 2, 3}, {3, 2, 1, 0}),
m::Op().WithShape(F32, {6, 5, 3, 4}, {3, 2, 0, 1}))));
}
TEST_F(LayoutAssignmentTest, TransposedDotLayout) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,2,3] parameter(0)
p1 = f32[5,3,4,6] parameter(1)
dot = f32[5,2,4,6] dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT out = f32[2,5,4,6] transpose(dot), dimensions={1,0,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Dot(m::Op().WithShape(F32, {5, 2, 3}, {2, 1, 0}),
m::Op().WithShape(F32, {5, 3, 4, 6}, {3, 2, 1, 0}))
.WithShape(F32, {5, 2, 4, 6}, {3, 2, 0, 1}))
.WithShape(F32, {2, 5, 4, 6}, {3, 2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, TransposedDotOfDotLayout) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[8,50] parameter(0)
p1 = f32[2,8,4,4] parameter(1)
p2 = f32[4,38] parameter(2)
dot.1 = f32[50,2,4,4]{3,2,1,0} dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
dot.2 = f32[50,2,4,38]{3,2,1,0} dot(dot.1, p2),
lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT out = f32[2,50,38,4]{2,3,0,1} transpose(dot.2), dimensions={1,0,3,2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Transpose(
m::Dot(m::Copy(m::Dot(m::Op().WithShape(F32, {8, 50}, {1, 0}),
m::Op().WithShape(F32, {2, 8, 4, 4},
{3, 2, 0, 1}))
.WithShape(F32, {50, 2, 4, 4}, {3, 2, 1, 0}))
.WithShape(F32, {50, 2, 4, 4}, {3, 1, 0, 2}),
m::Op().WithShape(F32, {4, 38}, {1, 0}))
.WithShape(F32, {50, 2, 4, 38}, {3, 2, 1, 0}))
.WithShape(F32, {2, 50, 38, 4}, {2, 3, 0, 1})));
}
TEST_F(LayoutAssignmentTest, DotLayoutS8) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY int8_t {
p0 = s8[32,64] parameter(0)
p1 = s8[64,96] parameter(1)
ROOT out = s32[32,96] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(S8, {32, 64}, {1, 0}),
m::Op().WithShape(S8, {64, 96}, {0, 1}))));
}
TEST_F(LayoutAssignmentTest, SortLayout) {
const char* hlo_text = R"(
HloModule SortLayout
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort {
keys = f32[3,2]{0,1} constant({{0,1},{0,1},{0,1}})
values = f32[2,3]{1,0} parameter(0)
transpose = f32[3,2]{1,0} transpose(values), dimensions={1,0}
ROOT sort = (f32[3,2]{1,0}, f32[3,2]{1,0}) sort(keys, transpose),
dimensions={1}, to_apply=compare
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Sort(m::Op().WithShape(F32, {3, 2}, {1, 0}),
m::Op().WithShape(F32, {3, 2}, {1, 0}))));
}
TEST_F(LayoutAssignmentTest, FftLayout) {
const char* hlo_text = R"(
HloModule Fft_module
ENTRY Fft {
input = c64[8,32]{0,1} parameter(0)
fft = c64[8,32] fft(input), fft_type=FFT, fft_length={32}
ROOT transpose = c64[32,8] transpose(fft), dimensions={1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
m::Transpose(m::Fft(m::Op().WithShape(C64, {8, 32}, {1, 0}))
.WithShape(C64, {8, 32}, {1, 0})))));
}
TEST_F(LayoutAssignmentTest, CustomCallConstrainedAlias) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
Arg_1 = f32[2,5,5]{2,1,0} parameter(1)
Arg_2 = f32[2,5,5]{2,1,0} parameter(2)
dot.0 = f32[2,5,5]{2,1,0} dot(Arg_1, Arg_2), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={2}, operand_precision={highest,highest}
custom-call.0 = (f32[2,5,5]{1,2,0}, s8[16]{0}, s8[16]{0}) custom-call(Arg_0, dot.0), custom_call_target="dummy_call", operand_layout_constraints={f32[2,5,5]{1,2,0}, f32[2,5,5]{1,2,0}}, output_to_operand_aliasing={{0}: (1, {})}
ROOT get-tuple-element.0 = f32[2,5,5]{1,2,0} get-tuple-element(custom-call.0), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
auto expect_layout = [](const Shape& shape,
absl::Span<const int64_t> minor_to_major) {
const Layout expected = LayoutUtil::MakeLayout(minor_to_major);
EXPECT_TRUE(LayoutUtil::Equal(shape.layout(), expected))
<< "Expected layout " << expected << ", actual " << shape.layout();
};
expect_layout(ShapeUtil::GetSubshape(call_0->shape(), {0}), {1, 2, 0});
expect_layout(call_0->operand(0)->shape(), {1, 2, 0});
expect_layout(call_0->operand(1)->shape(), {1, 2, 0});
}
TEST_F(LayoutAssignmentTest, MoveToHostCustomCallConstrained) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
custom-call.0 = f32[2,5,5] custom-call(Arg_0), custom_call_target="MoveToHost"
ROOT custom-call.1 = f32[2,5,5]{2, 1, 0} custom-call(custom-call.0), custom_call_target="fixed_call", operand_layout_constraints={f32[2,5,5]{1,2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
const Layout input_layout = call_0->operand(0)->shape().layout();
const Layout output_layout = call_0->shape().layout();
EXPECT_TRUE(LayoutUtil::Equal(input_layout, output_layout))
<< "Expected the same input/output layouts. Input: " << input_layout
<< ". Output: " << output_layout;
}
TEST_F(LayoutAssignmentTest, MoveToDeviceCustomCallConstrained) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
custom-call.0 = f32[2,5,5] custom-call(Arg_0), custom_call_target="MoveToDevice"
ROOT custom-call.1 = f32[2,5,5]{2, 1, 0} custom-call(custom-call.0), custom_call_target="fixed_call", operand_layout_constraints={f32[2,5,5]{1,2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
const Layout input_layout = call_0->operand(0)->shape().layout();
const Layout output_layout = call_0->shape().layout();
EXPECT_TRUE(LayoutUtil::Equal(input_layout, output_layout))
<< "Expected the same input/output layouts. Input: " << input_layout
<< ". Output: " << output_layout;
}
TEST_F(LayoutAssignmentTest, ConvCuDNNF8) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP() << "FP8 convolutions require HOPPER or newer archiecture.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0 = f8e4m3fn[1,64,64,16]{3,2,1,0} parameter(0)
Arg_1 = f8e4m3fn[3,3,16,32]{3,2,1,0} parameter(1)
ROOT conv = f8e4m3fn[1,64,64,32]{3,2,1,0} convolution(Arg_0, Arg_1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ConvCuDNNBF16) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv with Bfloat16 uses NHWC layout for "
"architectures with Tensor Cores.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0.1 = bf16[1,64,64,16]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = bf16[3,3,16,32]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = bf16[1,64,64,32]{3,2,1,0} convolution(Arg_0.1, Arg_1.2), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, metadata={op_name="jit(conv_general_dilated)/jit(main)/conv_general_dilated[window_strides=(1, 1) padding=((1, 1), (1, 1)) lhs_dilation=(1, 1) rhs_dilation=(1, 1) dimension_numbers=ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2)) feature_group_count=1 batch_group_count=1 lhs_shape=(1, 64, 64, 16) rhs_shape=(3, 3, 16, 32) precision=None preferred_element_type=None]" source_file="/usr/local/lib/python3.8/dist-packages/flax/linen/linear.py" source_line=438}
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ConvCuDNNFP16) {
if (!GetCudaComputeCapability().IsAtLeast(se::CudaComputeCapability::VOLTA)) {
GTEST_SKIP() << "Conv with FP16 uses NHWC layout for "
"architectures with Tensor Cores.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0.1 = f16[1,64,64,16]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = f16[3,3,16,32]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = f16[1,64,64,32]{3,2,1,0} convolution(Arg_0.1, Arg_1.2), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ReduceOperandLayout) {
const char* module_str = R"(
scalar_add_computation {
scalar_lhs = c64[] parameter(0)
scalar_rhs = c64[] parameter(1)
ROOT add.1 = c64[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_0 = c64[512,64,1024,32,128]{4,3,2,1,0} parameter(0)
negate = c64[512,64,1024,32,128]{4,3,2,1,0} negate(param_0)
constant_7 = c64[] constant((0, 0))
ROOT reduce.2 = c64[512,1024,128]{2,1,0} reduce(negate, constant_7), dimensions={1,3}, to_apply=scalar_add_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
auto reduce = m->entry_computation()->root_instruction();
EXPECT_EQ(reduce->operand(0)->shape().layout().minor_to_major(),
LayoutUtil::MakeLayout({3, 1, 4, 2, 0}).minor_to_major());
}
TEST_F(LayoutAssignmentTest, ReduceOperandLayoutDivisorOfWarpSize) {
const char* module_str = R"(
scalar_add_computation {
scalar_lhs = c64[] parameter(0)
scalar_rhs = c64[] parameter(1)
ROOT add.1 = c64[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_0 = c64[512,16,1024,128]{3,2,1,0} parameter(0)
negate = c64[512,16,1024,128]{3,2,1,0} negate(param_0)
constant_7 = c64[] constant((0, 0))
ROOT reduce.2 = c64[512,1024,128]{2,1,0} reduce(negate, constant_7), dimensions={1}, to_apply=scalar_add_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
auto reduce = m->entry_computation()->root_instruction();
EXPECT_EQ(reduce->operand(0)->shape().layout().minor_to_major(),
LayoutUtil::MakeLayout({1, 3, 2, 0}).minor_to_major());
}
TEST_F(LayoutAssignmentTest, SendRcvLayout) {
const char* hlo = R"(
HloModule Module
condition {
p = (f32[100,100], (f32[100,100], u32[], token[])) parameter(0)
ROOT lt = pred[] constant(1)
}
body {
p = (f32[100,100], (f32[100,100], u32[], token[])) parameter(0)
t1 = f32[100,100] get-tuple-element(p), index=0
t = (f32[100,100], u32[], token[]) get-tuple-element(p), index=1
sdone = token[] send-done(t), channel_id=3, frontend_attributes={
_xla_send_recv_pipeline="0"
}
tk = token[] after-all()
rcvd = (f32[100,100]{0,1}, u32[], token[]) recv(tk), channel_id=2
zz = (f32[100,100]{0,1}, token[]) recv-done(rcvd), channel_id=2
rcvd_d = get-tuple-element(zz), index=0
snd = (f32[100,100]{0,1}, u32[], token[]) send(t1, tk), channel_id=3, frontend_attributes={
_xla_send_recv_pipeline="0"
}
a = add(t1, t1)
b = add(rcvd_d, a)
ROOT tup = tuple(b, snd)
}
ENTRY %main {
p0 = f32[100,100] parameter(0)
tk = token[] after-all()
snd = (f32[100,100]{0,1}, u32[], token[]) send(p0, tk), channel_id=1, frontend_attributes={
_xla_send_recv_pipeline="0"
}
t = tuple(p0, snd)
ROOT loop = while(t), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
RunAndFilecheckHloRewrite(
hlo,
GpuLayoutAssignment{&computation_layout, GetGpuComputeCapability(),
GetDnnVersion()},
R"(
)");
}
}
}
} | 2,035 |
#ifndef XLA_SERVICE_GPU_SPLIT_K_GEMM_REWRITER_H_
#define XLA_SERVICE_GPU_SPLIT_K_GEMM_REWRITER_H_
#include <cstdint>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/matmul_utils.h"
namespace xla {
namespace gpu {
bool HasDivisibleSuffixAllowingSplit(absl::Span<int64_t const> span,
int64_t divisor);
absl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion,
const TritonGemmConfig& config);
}
}
#endif
#include "xla/service/gpu/split_k_gemm_rewriter.h"
#include <cmath>
#include <cstdint>
#include <iterator>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
bool HasDivisibleSuffixAllowingSplit(const absl::Span<int64_t const> span,
const int64_t divisor) {
CHECK_GE(divisor, 1);
int64_t product = 1;
for (auto it = span.crbegin(); it != span.crend(); ++it) {
product *= *it;
if (product % divisor == 0) {
return true;
}
if (divisor % product != 0) {
return false;
}
}
return false;
}
namespace {
void CopyIncrementingAboveThreshold(
const tsl::protobuf::RepeatedField<int64_t>& source,
tsl::protobuf::RepeatedField<int64_t>& destination, const int threshold) {
destination.Reserve(source.size());
for (int64_t x : source) {
if (x >= threshold) {
++x;
}
destination.Add(x);
}
}
void CopyIncrementingAboveThreshold(absl::Span<const int64_t> source,
DimensionVector& destination,
const int threshold) {
destination.reserve(source.size());
for (int64_t x : source) {
if (x >= threshold) {
++x;
}
destination.push_back(x);
}
}
absl::Status UncompilableMatmul(absl::string_view explanation) {
absl::Status s = absl::CancelledError(explanation);
s.SetPayload(kUncompilableFusion, absl::Cord(explanation));
return s;
}
absl::StatusOr<HloInstruction*> MakeSparseMetaOperand(
HloDotInstruction& dot, const TritonGemmConfig& config) {
CHECK_EQ(dot.sparse_operands(), 1);
CHECK_EQ(dot.sparsity().front().index(), 0);
HloInstruction* meta = dot.mutable_operand(2);
const Shape& shape = meta->shape();
if (shape.dimensions().back() % config.split_k != 0) {
return UncompilableMatmul("Sparsity metadata has incorrect shape.");
}
std::vector<int64_t> dimensions(shape.dimensions().begin(),
shape.dimensions().end() - 1);
dimensions.push_back(config.split_k);
dimensions.push_back(shape.dimensions().back() / config.split_k);
Shape new_shape = ShapeUtil::MakeShapeWithDescendingLayout(
shape.element_type(), dimensions);
return MakeBitcastHlo(meta, new_shape);
}
}
absl::StatusOr<HloInstruction*> MakeSplitKOperand(
HloInstruction& dot, const TritonFusionAnalysis& analysis,
const TritonGemmConfig& config, const int64_t contracting_dim_idx,
const int operand_number) {
HloInstruction* operand = dot.mutable_operand(operand_number);
const int64_t k = operand->shape().dimensions(contracting_dim_idx);
const bool need_padding = k % config.split_k != 0;
TritonFusionAnalysis::Scope scope = (operand_number == 0)
? TritonFusionAnalysis::Scope::LHS
: TritonFusionAnalysis::Scope::RHS;
auto check_if_supported = [&](const HloInstruction& hlo,
bool check_divisibility) {
const TensorIterationSpec::DimIterationSpec* spec =
analysis.IterSpec(scope, &hlo, contracting_dim_idx);
if (spec == nullptr) {
return absl::OkStatus();
}
if (spec->size() != 1) {
return UncompilableMatmul("Unsupported case.");
}
const TensorIterationSpec::IterationSpecFragment& fragment = spec->at(0);
if (fragment.is_sliced()) {
return UncompilableMatmul(
"Sliced contracting dimension is not supported yet.");
}
if (check_divisibility && !HasDivisibleSuffixAllowingSplit(
fragment.subfragments, config.split_k)) {
return UncompilableMatmul("Contracting dimension is too fragmented.");
}
if (config.split_k > ceil(1.0 * fragment.count / config.block_k)) {
return UncompilableMatmul(
"Too small divisible part of the contracting dimension.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
check_if_supported(*operand, !need_padding));
for (const HloInstruction* param : analysis.ScopeParameters(scope)) {
TF_RETURN_IF_ERROR(
check_if_supported(*param, !need_padding));
}
if (need_padding) {
HloInstruction* const zero =
dot.parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(operand->shape().element_type())));
PaddingConfig padding_config = MakeNoPaddingConfig(operand->shape().rank());
padding_config.mutable_dimensions(contracting_dim_idx)
->set_edge_padding_high(config.split_k - k % config.split_k);
TF_ASSIGN_OR_RETURN(HloInstruction * pad,
MakePadHlo(operand, zero, padding_config));
*pad->mutable_shape()->mutable_layout() = operand->shape().layout();
operand = pad;
}
CHECK_GE(operand->shape().dimensions(contracting_dim_idx), config.split_k);
const Shape& shape = operand->shape();
Shape new_shape(shape.element_type(), {}, {}, {});
for (int i = 0; i < shape.rank(); ++i) {
const int64_t dimension_size = shape.dimensions(i);
if (i == contracting_dim_idx) {
new_shape.add_dimensions(config.split_k);
new_shape.add_dimensions(dimension_size / config.split_k);
} else {
new_shape.add_dimensions(dimension_size);
}
}
Layout* new_layout = new_shape.mutable_layout();
for (int64_t logical_dim_idx : shape.layout().minor_to_major()) {
if (logical_dim_idx >= contracting_dim_idx) {
new_layout->add_minor_to_major(logical_dim_idx + 1);
}
if (logical_dim_idx <= contracting_dim_idx) {
new_layout->add_minor_to_major(logical_dim_idx);
}
}
return MakeBitcastHlo(operand, new_shape);
}
absl::Status MakeDotComputationSplitKBatch(
HloComputation* computation, const TritonGemmConfig& config,
bool disable_reduced_precision_reduction) {
HloDotInstruction* dot = Cast<HloDotInstruction>(
hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot));
TF_ASSIGN_OR_RETURN(const auto analysis,
TritonFusionAnalysis::Execute(*computation));
const DotDimensionNumbers& old_dim_numbers = dot->dot_dimension_numbers();
DotDimensionNumbers new_dim_numbers;
TF_ASSIGN_OR_RETURN(const int64_t lhs_contracting_idx,
ContractingDimensionIndex(*dot, 0));
CopyIncrementingAboveThreshold(
old_dim_numbers.lhs_contracting_dimensions(),
*new_dim_numbers.mutable_lhs_contracting_dimensions(),
lhs_contracting_idx);
new_dim_numbers.mutable_lhs_batch_dimensions()->Add(lhs_contracting_idx);
CopyIncrementingAboveThreshold(
old_dim_numbers.lhs_batch_dimensions(),
*new_dim_numbers.mutable_lhs_batch_dimensions(), lhs_contracting_idx);
TF_ASSIGN_OR_RETURN(const int64_t rhs_contracting_idx,
ContractingDimensionIndex(*dot, 1));
CopyIncrementingAboveThreshold(
old_dim_numbers.rhs_contracting_dimensions(),
*new_dim_numbers.mutable_rhs_contracting_dimensions(),
rhs_contracting_idx);
new_dim_numbers.mutable_rhs_batch_dimensions()->Add(rhs_contracting_idx);
CopyIncrementingAboveThreshold(
old_dim_numbers.rhs_batch_dimensions(),
*new_dim_numbers.mutable_rhs_batch_dimensions(), rhs_contracting_idx);
if (dot->sparse_operands()) {
if (dot->sparsity().size() != 1 || dot->sparsity().front().index() != 0) {
return UncompilableMatmul("Sparsity is only supported on left operand.");
}
}
std::stack<HloInstruction*> to_process;
absl::flat_hash_set<HloInstruction*> to_process_set;
HloInstruction* current = dot;
do {
to_process.push(current);
CHECK(to_process_set.insert(current).second);
if (current->users().empty()) {
break;
}
CHECK_EQ(current->user_count(), 1);
current = current->users()[0];
if (!legacy_triton::IsDistributiveOverAddition(*current)) {
return Cancelled("Operation non-distributive over addition after dot.");
}
} while (true);
bool did_pad = false;
while (!to_process.empty()) {
HloInstruction* current = to_process.top();
to_process.pop();
HloInstruction* expanded;
if (current == dot) {
TF_ASSIGN_OR_RETURN(
HloInstruction * lhs,
MakeSplitKOperand(*dot, analysis, config, lhs_contracting_idx, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * rhs,
MakeSplitKOperand(*dot, analysis, config, rhs_contracting_idx, 1));
if (lhs->operand(0)->opcode() == HloOpcode::kPad) {
CHECK_EQ(rhs->operand(0)->opcode(), HloOpcode::kPad);
did_pad = true;
}
std::vector<SparsityDescriptor> sparsity(dot->sparsity().begin(),
dot->sparsity().end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
sparsity[i].set_dimension(sparsity[i].dimension() + 1);
TF_ASSIGN_OR_RETURN(sparse_meta[i],
MakeSparseMetaOperand(*dot, config));
}
expanded = MakeDotHlo(lhs, rhs, new_dim_numbers, dot->precision_config(),
dot->shape().element_type(), sparsity, sparse_meta)
.value();
expanded->mutable_shape()->mutable_layout()->clear_minor_to_major();
CopyIncrementingAboveThreshold(dot->shape().layout().minor_to_major(),
*expanded->mutable_shape()
->mutable_layout()
->mutable_minor_to_major(),
0);
expanded->mutable_shape()->mutable_layout()->add_minor_to_major(0);
dot->SetupDerivedInstruction(expanded);
} else {
expanded = computation->AddInstruction(current->CloneWithNewShape(
ShapeUtil::PrependMajorDimension(config.split_k, current->shape())));
if (expanded->opcode() == HloOpcode::kTranspose) {
const auto* old_transpose = Cast<HloTransposeInstruction>(current);
auto* new_transpose = Cast<HloTransposeInstruction>(expanded);
new_transpose->mutable_dimensions()->clear();
new_transpose->mutable_dimensions()->reserve(
new_transpose->shape().rank());
new_transpose->mutable_dimensions()->push_back(0);
for (const int64_t dim : old_transpose->dimensions()) {
new_transpose->mutable_dimensions()->push_back(dim + 1);
}
}
}
TF_RETURN_IF_ERROR(current->ReplaceAllUsesWithDifferentShape(expanded));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(current));
if (current == dot) {
continue;
}
for (int i = 0; i < expanded->operands().size(); ++i) {
HloInstruction* operand = expanded->mutable_operand(i);
if (!to_process_set.contains(operand)) {
std::vector<int64_t> broadcast_dimensions(operand->shape().rank());
absl::c_iota(broadcast_dimensions, 1);
TF_RETURN_IF_ERROR(expanded->ReplaceOperandWithDifferentShape(
i, MakeBroadcastHlo(operand, broadcast_dimensions,
ShapeUtil::PrependMajorDimension(
config.split_k, operand->shape()))));
}
}
}
if (disable_reduced_precision_reduction) {
PrimitiveType output_type =
computation->root_instruction()->shape().element_type();
PrimitiveType accumulator_type = output_type == PrimitiveType::F64
? PrimitiveType::F64
: PrimitiveType::F32;
computation->root_instruction()->mutable_shape()->set_element_type(
accumulator_type);
}
if (did_pad) {
TF_RETURN_IF_ERROR(
TritonFusionAnalysis::Execute(*computation, config.split_k).status());
}
return absl::OkStatus();
}
absl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion,
const TritonGemmConfig& config) {
CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion);
if (dot_fusion->shape().IsTuple()) {
return Unimplemented("Tuple output is not supported with split-K yet.");
}
const bool disable_reduced_precision_reduction =
dot_fusion->GetModule()
->config()
.debug_options()
.xla_gpu_triton_gemm_disable_reduced_precision_reduction();
const PrimitiveType output_type = dot_fusion->shape().element_type();
const Layout output_layout = dot_fusion->shape().layout();
TF_RETURN_IF_ERROR(MakeDotComputationSplitKBatch(
dot_fusion->fused_instructions_computation(), config,
disable_reduced_precision_reduction));
const HloInstruction* root = dot_fusion->fused_expression_root();
*dot_fusion->mutable_shape() = root->shape();
HloInstruction* zero =
dot_fusion->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(root->shape().element_type())));
TF_ASSIGN_OR_RETURN(HloInstruction * reduce,
MakeReduceHlo(dot_fusion, zero, {0},
HloOpcode::kAdd, &dot_fusion->metadata()));
*reduce->mutable_shape()->mutable_layout() = output_layout;
if (dot_fusion->IsRoot()) {
dot_fusion->parent()->set_root_instruction(reduce,
true);
} else {
TF_RETURN_IF_ERROR(dot_fusion->ReplaceAllUsesWithDifferentShape(reduce));
}
if (disable_reduced_precision_reduction) {
HloInstruction* convert = MakeConvertToHlo(reduce, output_type);
if (reduce->IsRoot()) {
reduce->parent()->set_root_instruction(convert,
true);
} else {
TF_RETURN_IF_ERROR(reduce->ReplaceAllUsesWithDifferentShape(convert));
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/split_k_gemm_rewriter.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
namespace m = ::xla::match;
TEST(HasDivisibleSuffixAllowingSplitTest, AllTests) {
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({1}, 1));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2}, 2));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 2}, 2));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 3, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({15, 2}, 6));
EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 15, 2}, 6));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({}, 1));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({1}, 2));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({3}, 2));
EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({2, 3}, 2));
}
using SplitKTest = HloTestBase;
TEST_F(SplitKTest, MakeSplitK) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm",
metadata={op_name="foo"}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
EXPECT_EQ(root->metadata().op_name(), "foo");
}
TEST_F(SplitKTest, MakeSplitKWithOutputFusion) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = bf16[] constant(123)
n = bf16[] negate(c)
bc = bf16[480,16]{1,0} broadcast(n)
cv = bf16[480,16]{1,0} convert(d)
ROOT a = bf16[480,16]{1,0} multiply(bc, cv)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
}
TEST_F(SplitKTest, PreventSplitKWithNonDistributiveOperations) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = f32[480,16]{1,0} convert(d)
ROOT s = f32[480,16]{1,0} tanh(c)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = f32[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
absl::StrFormat(
"Operation non-distributive over addition after dot.")));
}
TEST_F(SplitKTest, MakeSplitKWithNonDivisibleDimensionSize) {
constexpr absl::string_view kHloText = R"(
t {
c1 = s32[] constant(1)
bc1 = s32[31]{0} broadcast(c1), dimensions={}
p0 = s32[31]{0} parameter(0)
cmp = pred[31]{0} compare(bc1, p0), direction=EQ
cvt = f32[31]{0} convert(cmp)
bc2 = f32[17,31]{1,0} broadcast(cvt), dimensions={1}
c0 = f32[] constant(0)
bc0 = f32[17,16]{1,0} broadcast(c0), dimensions={}
ROOT dot = f32[31,16]{1,0} dot(bc2, bc0),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s32[31]{0} parameter(0)
ROOT r = f32[31,16]{1,0} fusion(p0),
kind=kCustom, calls=t, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 2, 1, 2);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, AvoidSplitKWithSlicedContractingDimension) {
const std::string hlo_text = R"(
t {
p0 = f16[32,1234] parameter(0)
s0 = f16[32,256] slice(p0), slice={[0:32], [41:297]}
p1 = f16[256,768] parameter(1)
ROOT d = f16[32,768] dot(s0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[32,1234] parameter(0)
p1 = f16[256,768] parameter(1)
ROOT r = f16[32,768] fusion(p0, p1),
kind=kCustom, calls=t, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 2, 1, 2);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
absl::StrFormat(
"Sliced contracting dimension is not supported yet.")));
}
TEST_F(SplitKTest, MakeSplitKWithNonStandardOutputLayout) {
const std::string kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{0,1} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{0,1} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
EXPECT_EQ(module->entry_computation()->root_instruction()->shape().layout(),
Layout({0, 1}));
}
TEST_F(SplitKTest, MakeSplitKWithExistingBatchDim) {
const std::string hlo_text = R"(
HloModule m
triton_gemm_dot.24 {
parameter_1 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)
bitcast.3 = bf16[800,5,128]{2,1,0} bitcast(parameter_1)
convert.3 = f32[800,5,128]{2,1,0} convert(bitcast.3)
parameter_0 = f32[1,5,700,800]{3,2,1,0} parameter(0)
bitcast.2 = f32[5,700,800]{2,1,0} bitcast(parameter_0)
ROOT dot.26 = f32[5,128,700]{2,1,0} dot(convert.3, bitcast.2),
lhs_batch_dims={1}, lhs_contracting_dims={0},
rhs_batch_dims={0}, rhs_contracting_dims={2}
}
ENTRY e {
tmp_3 = f32[1,5,700,800]{3,2,1,0} parameter(0)
tmp_0 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)
ROOT triton_gemm_dot.24 = f32[5,128,700]{2,1,0} fusion(tmp_3, tmp_0),
kind=kCustom, calls=triton_gemm_dot.24,
backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(32, 64, 64, 8, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
}
TEST_F(SplitKTest, SupportsIndivisible) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,129,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,129]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,129]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,129]{1,0} reshape(copy.1)
convert.8 = bf16[480,129]{1,0} convert(reshape.5)
parameter_1 = bf16[16,129]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,129,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,129]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK4) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,129]{1,0} parameter(0)
convert_0 = bf16[480,129]{1,0} convert(parameter_0)
parameter_1 = bf16[16,129]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,129]{1,0} parameter(0)
p1 = bf16[16,129]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithCustomLayout) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,129]{0,1} parameter(0)
convert_0 = bf16[480,129]{0,1} convert(parameter_0)
parameter_1 = bf16[16,129]{0,1} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,129]{0,1} parameter(0)
p1 = bf16[16,129]{0,1} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
constexpr TritonGemmConfig kConfig(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), kConfig));
TF_EXPECT_OK(HloVerifier(true,
true,
LayoutAssignment::InstructionCanChangeLayout)
.Run(module.get())
.status());
}
TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK16) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,255]{1,0} parameter(0)
convert_0 = bf16[480,255]{1,0} convert(parameter_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,255]{1,0} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithTranspose) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[480,255]{1,0} parameter(0)
convert_0 = bf16[480,255]{1,0} convert(parameter_0)
transpose_0 = bf16[255,480]{1,0} transpose(convert_0), dimensions={1,0}
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(transpose_0, parameter_1),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[480,255]{1,0} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportIndivisibleWithBroadcast) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[] parameter(0)
convert_0 = bf16[] convert(parameter_0)
broadcast_0 = bf16[480,255]{1,0} broadcast(convert_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(broadcast_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[] parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SupportsIndivisibleWithBitcast) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,5,480,17]{3,0,1,2} parameter(0)
convert_0 = bf16[3,5,480,17]{3,0,1,2} convert(parameter_0)
bitcast_0 = bf16[480,255]{1,0} bitcast(convert_0)
parameter_1 = bf16[16,255]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(bitcast_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,5,480,17]{3,0,1,2} parameter(0)
p1 = bf16[16,255]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 16, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
}
TEST_F(SplitKTest, SkipSmallK) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,64,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,64]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,64]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,64]{1,0} reshape(copy.1)
convert.8 = bf16[480,64]{1,0} convert(reshape.5)
parameter_1 = bf16[16,64]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,64,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,64]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 128, 4, 1, 4);
EXPECT_THAT(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config),
tsl::testing::StatusIs(
tsl::error::CANCELLED,
"Too small divisible part of the contracting dimension."));
}
TEST_F(SplitKTest, FragmentedKSupported) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[7,2,16,4,20] parameter(0)
t0 = f16[2,16,4,20,7] transpose(p0), dimensions={1,2,3,4,0}
b0 = f16[2560,7] bitcast(t0)
a1 = f16[2560,5] parameter(1)
ROOT r = f16[7,5] dot(b0, a1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[7,2,16,4,20] parameter(0)
p1 = f16[2560,5] parameter(1)
ROOT fusion = f16[7,5] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(32, 32, 16, 1, 1, 4);
config.split_k = 5;
EXPECT_THAT(
MakeDotSplitKBatch(module->entry_computation()->root_instruction(),
config),
tsl::testing::StatusIs(tsl::error::CANCELLED,
"Contracting dimension is too fragmented."));
config.split_k = 8;
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
TF_ASSERT_OK_AND_ASSIGN(
const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation, config.split_k));
EXPECT_EQ(dot_computation->root_instruction()->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {8, 7, 5}));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 2560, 0,
2560,
ElementsAre(20, 4, 4, 4, 2))));
}
TEST_F(SplitKTest, FragmentedKUnsupported) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f32[3,128,77] parameter(0)
b0 = f32[384,77] bitcast(p0)
a1 = f32[384,25] parameter(1)
ROOT r = f32[77,25] dot(b0, a1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[3,128,77] parameter(0)
p1 = f32[384,25] parameter(1)
ROOT fusion = f32[77,25] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
EXPECT_THAT(
MakeDotSplitKBatch(module->entry_computation()->root_instruction(),
config),
tsl::testing::StatusIs(tsl::error::CANCELLED,
"Contracting dimension is too fragmented."));
}
TEST_F(SplitKTest, MakeSplitKWithNonDefaultOutputLayout) {
const std::string kHloText = R"(
triton_gemm_dot.4842_computation {
parameter_0 = bf16[96,96]{1,0} parameter(0)
parameter_1 = bf16[96,7]{1,0} parameter(1)
dot.0 = bf16[96,7]{0,1} dot(parameter_0, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT bitcast.2 = bf16[7,3,32]{2,1,0} bitcast(dot.0)
}
ENTRY e {
parameter_0.91 = bf16[96,96]{1,0} parameter(0)
parameter_1.86 = bf16[96,7]{1,0} parameter(1)
ROOT triton_gemm_dot.4842 = bf16[7,3,32]{2,1,0}
fusion(parameter_0.91, parameter_1.86), kind=kCustom,
calls=triton_gemm_dot.4842_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 2, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduce);
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
}
TEST_F(SplitKTest, SparseDotWithLhsSparseOperandIsRewritten) {
const std::string hlo_text = R"(
HloModule test
triton_gemm {
lhs = f16[2,5,1600] parameter(0)
rhs = f16[2,3200,10] parameter(1)
meta = u16[2,5,200] parameter(2)
ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=L.2@2:4
}
ENTRY e {
lhs = f16[2,5,1600] parameter(0)
rhs = f16[2,3200,10] parameter(1)
meta = u16[2,5,200] parameter(2)
ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 1);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReduce);
HloInstruction* dot =
module->GetComputationWithName("triton_gemm")->root_instruction();
EXPECT_EQ(dot->operand(0)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 5, 4, 400}));
EXPECT_EQ(dot->operand(1)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 4, 800, 10}));
EXPECT_EQ(dot->operand(2)->shape(),
ShapeUtil::MakeShapeWithDescendingLayout(U16, {2, 5, 4, 50}));
}
TEST_F(SplitKTest, SparseDotWithRhsSparseOperandTriggersError) {
const std::string hlo_text = R"(
HloModule test
triton_gemm {
lhs = f16[2,5,3200] parameter(0)
rhs = f16[2,1600,10] parameter(1)
meta = u16[2,200,10] parameter(2)
ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=R.1@2:4
}
ENTRY e {
lhs = f16[2,5,3200] parameter(0)
rhs = f16[2,1600,10] parameter(1)
meta = u16[2,200,10] parameter(2)
ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 1);
auto result = MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config);
EXPECT_FALSE(result.ok());
}
class SplitKTestWithMorePreciseReduction
: public HloTestBase,
public ::testing::WithParamInterface<int> {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(
true);
return debug_options;
}
};
TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitK) {
constexpr absl::string_view kHloText = R"(
HloModule t
triton_gemm_dot {
parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)
copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)
reshape.5 = s8[480,128]{1,0} reshape(copy.1)
convert.8 = bf16[480,128]{1,0} convert(reshape.5)
parameter_1 = bf16[16,128]{1,0} parameter(1)
ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)
p1 = bf16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));
}
TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitKWithOutputFusion) {
const std::string hlo_text = R"(
HloModule t
triton_gemm_dot {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
d = f16[480,16]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
c = bf16[] constant(123)
n = bf16[] negate(c)
bc = bf16[480,16]{1,0} broadcast(n)
cv = bf16[480,16]{1,0} convert(d)
ROOT a = bf16[480,16]{1,0} multiply(bc, cv)
}
ENTRY e {
p0 = f16[480,128]{1,0} parameter(0)
p1 = f16[16,128]{1,0} parameter(1)
ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 16, 16, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));
}
TEST_F(SplitKTest, MakeSplitKWithTransposeAfterDot) {
const std::string hlo_text = R"(
triton_gemm_dot {
p0 = f16[8,288,288]{2,1,0} parameter(0)
p1 = f16[8,288,32]{2,0,1} parameter(1)
d = f16[8,288,32]{2,1,0} dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT t = f16[288,8,32]{2,1,0} transpose(d), dimensions={1,0,2}
}
ENTRY e {
p0 = f16[8,288,288]{2,1,0} parameter(0)
p1 = f16[8,288,32]{2,0,1} parameter(1)
ROOT fusion = f16[288,8,32]{2,1,0} fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 128, 32, 8, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
const auto* transpose =
Cast<HloTransposeInstruction>(module->entry_computation()
->root_instruction()
->operand(0)
->fused_instructions_computation()
->root_instruction());
EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 2, 1, 3));
}
TEST_F(SplitKTest, MakeSplitKWithTrivialDimension) {
const std::string hlo_text = R"(
triton_gemm_dot {
parameter_0 = f32[1001,1]{1,0} parameter(0)
parameter_1 = f32[1001,2048]{1,0} parameter(1)
ROOT dot = f32[1,2048]{1,0} dot(parameter_0, parameter_1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY %entry_computation {
p0 = f32[1001,1]{1,0} parameter(0)
p1 = f32[1001,2048]{1,0} parameter(1)
ROOT fusion = f32[1,2048]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_gemm_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TritonGemmConfig config(16, 128, 64, 4, 1, 4);
TF_EXPECT_OK(MakeDotSplitKBatch(
module->entry_computation()->root_instruction(), config));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reduce(m::Fusion(), m::Constant())));
}
}
}
} | 2,036 |
#ifndef XLA_SERVICE_GPU_GPU_ALGEBRAIC_SIMPLIFIER_H_
#define XLA_SERVICE_GPU_GPU_ALGEBRAIC_SIMPLIFIER_H_
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla::gpu {
class GpuAlgebraicSimplifierVisitor : public AlgebraicSimplifierVisitor {
public:
explicit GpuAlgebraicSimplifierVisitor(
const AlgebraicSimplifierOptions& options,
se::GpuComputeCapability compute_capability,
AlgebraicSimplifier* simplifier)
: AlgebraicSimplifierVisitor(options, simplifier),
compute_capability_(std::move(compute_capability)) {}
bool ShouldStrengthReduceDotToReduce(const HloInstruction* hlo) override;
private:
se::GpuComputeCapability compute_capability_;
};
class GpuAlgebraicSimplifier : public AlgebraicSimplifier {
public:
explicit GpuAlgebraicSimplifier(const AlgebraicSimplifierOptions& options,
se::GpuComputeCapability compute_capability)
: AlgebraicSimplifier(options),
compute_capability_(std::move(compute_capability)) {}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
XLA_VLOG_LINES(
2, "GpuAlgebraicSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
GpuAlgebraicSimplifierVisitor visitor(options_, compute_capability_, this);
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (visitor.Run(comp, options_, this)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "GpuAlgebraicSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
private:
se::GpuComputeCapability compute_capability_;
};
}
#endif
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu {
bool GpuAlgebraicSimplifierVisitor::ShouldStrengthReduceDotToReduce(
const HloInstruction* hlo) {
if (!options_.enable_dot_strength_reduction()) {
return false;
}
const HloDotInstruction* dot = DynCast<HloDotInstruction>(hlo);
if (dot == nullptr) {
return false;
}
const HloInstruction* lhs = dot->operand(0);
const HloInstruction* rhs = dot->operand(1);
DotDimensionNumbers dnums = dot->dot_dimension_numbers();
bool lhs_is_vector = (dnums.lhs_batch_dimensions_size() +
dnums.lhs_contracting_dimensions_size() ==
lhs->shape().rank());
bool rhs_is_vector = (dnums.rhs_batch_dimensions_size() +
dnums.rhs_contracting_dimensions_size() ==
rhs->shape().rank());
if (lhs_is_vector && rhs_is_vector) {
return true;
}
absl::StatusOr<bool> is_too_small =
IsMatrixMultiplicationTooSmallForRewriting(*hlo, 1000000);
CHECK_OK(is_too_small.status());
if (is_too_small.value()) {
return true;
}
return !legacy_triton::CanTritonHandleGEMM(*dot, compute_capability_);
}
} | #include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class GpuAlgebraicSimplifierTest : public HloTestBase {};
TEST_F(GpuAlgebraicSimplifierTest, VectorVectorDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 500] parameter(0)
p1 = f32[32, 500] parameter(1)
ROOT dot = f32[32] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, MatrixVectorDotShouldNotBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 5000, 7000] parameter(0)
p1 = f32[32, 5000] parameter(1)
ROOT dot = f32[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_FALSE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest,
DotWithTypeUnsupportedByGemmFusionShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = c64[32, 5000, 7000] parameter(0)
p1 = c64[32, 5000] parameter(1)
ROOT dot = c64[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, SmallDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 50, 70] parameter(0)
p1 = f32[32, 50] parameter(1)
ROOT dot = f32[32,70] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
}
} | 2,037 |
#ifndef XLA_SERVICE_GPU_GPU_REDUCE_SCATTER_CREATOR_H_
#define XLA_SERVICE_GPU_GPU_REDUCE_SCATTER_CREATOR_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReduceScatterCreator : public HloModulePass {
public:
ReduceScatterCreator() = default;
absl::string_view name() const override { return "reduce-scatter-creator"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gpu_reduce_scatter_creator.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_opt_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> ReduceScatterCreator::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
const HloModuleConfig &config = module->config();
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction *instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kAllReduce) {
continue;
}
auto *ar = Cast<HloAllReduceInstruction>(instruction);
auto ar_spec = MatchReduceScatter(ar, config.num_partitions(),
config.replica_count(),
false,
true);
if (!ar_spec) {
VLOG(2) << "Cannot match reduce-scatter " << ar->ToString();
continue;
}
HloInstruction *ds = ar_spec->dynamic_slice;
const int64_t split_dim = ar_spec->split_dim;
Shape scatter_shape = ar->shape();
const int64_t split_dim_size = scatter_shape.dimensions(split_dim);
HloInstruction *rs_input = ar->mutable_operand(0);
const int64_t scatter_dim_size = split_dim_size / ar_spec->group_size;
TF_RET_CHECK(scatter_dim_size * ar_spec->group_size <= split_dim_size);
if (split_dim_size % ar_spec->group_size != 0) {
scatter_shape.set_dimensions(split_dim,
scatter_dim_size * ar_spec->group_size);
rs_input = computation->AddInstruction(HloInstruction::CreateSlice(
scatter_shape, rs_input,
std::vector<int64_t>(scatter_shape.rank(), 0),
scatter_shape.dimensions(),
std::vector<int64_t>(scatter_shape.rank(), 1)));
}
scatter_shape.set_dimensions(split_dim, scatter_dim_size);
std::optional<int64_t> channel_id;
if (ar->channel_id()) {
channel_id = next_channel_id++;
}
HloInstruction *ars =
computation->AddInstruction(HloInstruction::CreateReduceScatter(
scatter_shape, {rs_input}, ar->to_apply(), ar->device_list(),
ar->constrain_layout(), channel_id, ar->use_global_device_ids(),
ar_spec->split_dim));
HloInstruction *result = ars;
HloInstruction *reshape = nullptr;
if (ds->operand(0) != ar) {
reshape = ds->mutable_operand(0);
result = computation->AddInstruction(
HloInstruction::CreateReshape(ds->shape(), result));
}
TF_RETURN_IF_ERROR(ds->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ds));
if (reshape) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(reshape));
}
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ar));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_reduce_scatter_creator.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class GpuReduceScatterCreatorTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions, bool expect_change) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
auto changed = ReduceScatterCreator().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t AllReduceCount(std::unique_ptr<HloModule> &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
size_t ReduceScatterCount(std::unique_ptr<HloModule> &module) {
return CollectiveCount(module, HloOpcode::kAllReduce);
}
private:
size_t CollectiveCount(std::unique_ptr<HloModule> &module, HloOpcode opcode) {
return absl::c_count_if(
module->entry_computation()->instructions(),
[&opcode](HloInstruction *instr) { return instr->opcode() == opcode; });
}
};
TEST_F(GpuReduceScatterCreatorTest, AllReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithOffsetReshape) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%slice_size = s32[1] constant({4})
%offset = s32[1] multiply(%id, %slice_size)
%reshape = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %reshape, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshape) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
%reshape.1 = f32[32,16,64] reshape(%all-reduce)
ROOT %dynamic-slice = f32[4,16,64] dynamic-slice(%reshape.1, %offset, %zero, %zero),
dynamic_slice_sizes={4,16,64}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0)))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshapeSplitDimModified) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[336,1024] parameter(0)
%all-reduce = f32[336,1024] all-reduce(%param), replica_groups={}, to_apply=%sum
%rid = u32[] replica-id()
%id = s32[] convert(%rid)
%slice_size = s32[] constant(128)
%offset = s32[] multiply(%id, %slice_size)
%zero = s32[] constant(0)
%reshape.1 = f32[4,84,1024] reshape(%all-reduce)
ROOT %dynamic-slice = f32[4,84,128] dynamic-slice(%reshape.1, %zero, %zero, %offset),
dynamic_slice_sizes={4,84,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0)))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasDim2) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%rid = u32[] replica-id()
%rid_s32 = s32[] convert(%rid)
%slice_size = s32[] constant(16)
%offset = s32[] multiply(%rid_s32, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[32,8,16] dynamic-slice(%all-reduce, %zero, %zero, %offset),
dynamic_slice_sizes={32,8,16}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
const auto *rs = Cast<HloReduceScatterInstruction>(
module->entry_computation()->root_instruction());
EXPECT_EQ(rs->scatter_dimension(), 2) << rs->ToString();
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasWrongOffsets) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} constant({0,1,2,3,4,5,6,8})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
}
TEST_F(GpuReduceScatterCreatorTest, AllReplicasIotaTable) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={}, to_apply=%sum
%table = s32[8]{0} iota(), iota_dimension=0
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
2,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupedReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum
%gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3})
%rid = u32[] replica-id()
%id = s32[1] dynamic-slice(%gtable, %rid), dynamic_slice_sizes={1}
%reshape.0 = s32[] reshape(%id)
%table = s32[4]{0} constant({0,8,16,24})
%offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1}
%reshape.1 = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
2,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllPartitions) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{0},{1}}, to_apply=%sum, channel_id=1
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%pid = u32[] partition-id()
%id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, AllReduceFollowedByAllReduce) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce.scattered = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{0,1,2,3,4,5,6,7},{8,9,10,11,12,13,14,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=1
%table = s32[8]{0} constant({0,1,2,3,4,5,6,7})
%pid = u32[] partition-id()
%id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%id)
%slice_size = s32[] constant(4)
%offset = s32[] multiply(%reshape, %slice_size)
%zero = s32[] constant(0)
%dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce.scattered, %offset, %zero, %zero),
dynamic_slice_sizes={4,8,128}
ROOT %all-reduce.sync = f32[4,8,128]{2,1,0} all-reduce(%dynamic-slice),
replica_groups={{0,8},{1,9},{2,10},{3,11},{4,12},{5,13},{6,14},{7,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
8,
true));
EXPECT_EQ(AllReduceCount(module), 1);
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobals) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%rid = u32[] replica-id()
%pcount = u32[] constant(4)
%ridxp = u32[] multiply(%rid, %pcount)
%gid = u32[] add(%ridxp, %pid)
%gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3})
%id = s32[1] dynamic-slice(%gtable, %gid), dynamic_slice_sizes={1}
%reshape.0 = s32[] reshape(%id)
%table = s32[4]{0} constant({0,8,16,24})
%offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1}
%reshape.1 = s32[] reshape(%offset)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsOrthogonalReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{5,7,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[4]{0} constant({3,0,2,1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(8)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Parameter(0))));
EXPECT_EQ(AllReduceCount(module), 0);
}
TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsNonOrthogonalReplicas) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[32,8,128]{2,1,0} parameter(0)
%all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param),
replica_groups={{1,3,2,0},{7,5,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[4]{0} constant({3,0,2,1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(8)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero),
dynamic_slice_sizes={8,8,128}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
2,
4,
false));
}
TEST_F(GpuReduceScatterCreatorTest, NonUniformSplit) {
absl::string_view hlo_string = R"(
HloModule AllReduce
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %AllReduce {
%param = f32[1,7]{1,0} parameter(0)
%all-reduce = f32[1,7]{1,0} all-reduce(%param),
replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true
%pid = u32[] partition-id()
%pid_table = s32[8]{0} constant({0, 1, 0, 1, 0, 1, 0, 1})
%offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1}
%reshape = s32[] reshape(%offset)
%shard_size = s32[] constant(3)
%mul = s32[] multiply(%reshape, %shard_size)
%zero = s32[] constant(0)
ROOT %dynamic-slice = f32[1,3] dynamic-slice(%all-reduce, %zero, %mul),
dynamic_slice_sizes={1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
8,
true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::ReduceScatter(m::Slice(m::Parameter(0)))));
}
}
}
} | 2,038 |
#ifndef XLA_SERVICE_GPU_PRIORITY_FUSION_H_
#define XLA_SERVICE_GPU_PRIORITY_FUSION_H_
#include <stdint.h>
#include <memory>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/fusion_queue.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
class GpuPriorityFusion : public InstructionFusion {
public:
GpuPriorityFusion(tsl::thread::ThreadPool* thread_pool,
const se::DeviceDescription& device,
GpuHloCostAnalysis::Options cost_analysis_options)
: InstructionFusion(GpuPriorityFusion::IsExpensive),
thread_pool_(thread_pool),
device_info_(device),
cost_analysis_options_(std::move(cost_analysis_options)),
fusion_analysis_cache_(device_info_) {}
absl::string_view name() const override { return "priority-fusion"; }
static bool IsExpensive(const HloInstruction& instruction);
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
protected:
std::unique_ptr<FusionQueue> GetFusionQueue(
HloComputation* computation) override;
FusionDecision ShouldFuse(HloInstruction* consumer,
int64_t operand_index) override;
HloInstruction::FusionKind ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) override;
private:
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override;
bool ConsumeFuel(HloInstruction* producer, HloInstruction* consumer);
tsl::thread::ThreadPool* thread_pool_;
se::DeviceDescription device_info_;
GpuHloCostAnalysis::Options cost_analysis_options_;
std::unique_ptr<FusionProcessDumpProto> fusion_process_dump_;
HloFusionAnalysisCache fusion_analysis_cache_;
mlir::MLIRContext mlir_context_;
};
}
}
#endif
#include "xla/service/gpu/priority_fusion.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/dump.h"
#include "xla/service/fusion_queue.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
namespace {
bool ElementIsF32OrF16(const Shape& shape) {
PrimitiveType type = shape.element_type();
return type == F32 || type == F16;
}
bool IsFusible(const HloInstruction& instr) {
if (!instr.IsFusible()) {
return false;
}
if (instr.IsElementwise()) {
return true;
}
switch (instr.opcode()) {
case HloOpcode::kFusion:
return instr.fusion_kind() != HloInstruction::FusionKind::kCustom;
case HloOpcode::kCopy:
case HloOpcode::kIota:
case HloOpcode::kConstant:
case HloOpcode::kReduce:
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kScatter:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
class GpuPriorityFusionQueue {
using Priority = int64_t;
using CanFuseCallback = std::function<FusionDecision(
HloInstruction* , int64_t )>;
public:
GpuPriorityFusionQueue(
HloComputation* computation,
const GpuHloCostAnalysis::Options& cost_analysis_options,
const se::DeviceDescription* device_info,
FusionProcessDumpProto* fusion_process_dump,
tsl::thread::ThreadPool* thread_pool, mlir::MLIRContext* mlir_context,
HloFusionAnalysisCache& fusion_analysis_cache,
bool triton_softmax_priority_fusion_enabled)
: computation_(computation),
cost_analysis_(cost_analysis_options, device_info),
fusion_process_dump_(fusion_process_dump),
thread_pool_(thread_pool),
mlir_context_(mlir_context),
fusion_analysis_cache_(fusion_analysis_cache),
triton_softmax_priority_fusion_enabled_(
triton_softmax_priority_fusion_enabled) {
VLOG(2) << "Running full HLO cost analysis for " << computation_->name();
TF_CHECK_OK(computation_->Accept(&cost_analysis_));
dump_fusion_visualization_ = computation->parent()
->config()
.debug_options()
.xla_dump_fusion_visualization();
std::vector<HloInstruction*> instructions;
for (auto* instruction : computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->user_count() == 0 || !instruction->IsFusible() ||
instruction->opcode() == HloOpcode::kTuple ||
instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
instructions.push_back(instruction);
}
ComputeAndSetPriorities(instructions);
}
void ComputeAndSetPriorities(
const std::vector<HloInstruction*>& instructions) {
std::vector<Priority> priorities = ComputePriorities(instructions);
for (auto [instruction, priority] : llvm::zip(instructions, priorities)) {
auto key = std::make_pair(priority, instruction->unique_id());
auto reverse_it = reverse_map_.find(instruction);
if (reverse_it != reverse_map_.end()) {
const PriorityQueue::iterator& queue_it = reverse_it->second;
if (key == queue_it->first) {
continue;
}
producer_priority_queue_.erase(queue_it);
reverse_map_.erase(reverse_it);
}
if (priority < 0) {
continue;
}
auto emplace_result = producer_priority_queue_.emplace(key, instruction);
reverse_map_.emplace(instruction, emplace_result.first);
}
}
std::vector<Priority> ComputePriorities(
const std::vector<HloInstruction*>& instructions) {
auto schedule_or_run = [this](std::function<void()> fn) {
if (thread_pool_) {
thread_pool_->Schedule(std::move(fn));
} else {
fn();
}
};
tsl::BlockingCounter counter(instructions.size());
std::vector<Priority> priorities(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
schedule_or_run([&, i] {
priorities[i] = CalculateProducerPriority(instructions[i]);
counter.DecrementCount();
});
}
counter.Wait();
return priorities;
}
bool DequeueNextProducer() {
current_producer_ = nullptr;
current_consumers_.clear();
while (!producer_priority_queue_.empty() && current_consumers_.empty()) {
auto next_it = std::prev(producer_priority_queue_.end());
current_producer_ = next_it->second;
producer_priority_queue_.erase(next_it);
reverse_map_.erase(current_producer_);
current_consumers_ = current_producer_->users();
if (current_producer_->opcode() == HloOpcode::kBitcast) {
llvm::erase_if(current_consumers_, [&](HloInstruction* consumer) {
return !CanFuseCached(current_producer_, consumer);
});
}
}
return !current_consumers_.empty();
}
void UpdatePriorities() {
for (auto instruction : to_update_priority_) {
TF_CHECK_OK(cost_analysis_.RevisitInstruction(instruction));
}
ComputeAndSetPriorities(std::vector<HloInstruction*>{
to_update_priority_.begin(), to_update_priority_.end()});
to_update_priority_.clear();
}
void PreFusion(HloInstruction* producer, HloInstruction* consumer) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
consumer->name(), "| inside PriorityFusion"),
*consumer, producer);
}
InvalidateCaches(producer);
InvalidateCaches(consumer);
}
void InvalidateCaches(HloInstruction* instruction) {
can_fuse_cache_.erase(instruction);
for (const HloInstruction* operand : instruction->operands()) {
auto it = can_fuse_cache_.find(operand);
if (it != can_fuse_cache_.end()) {
it->second.erase(instruction);
}
}
gpu_performance_model_cache_.Invalidate(*instruction);
fusion_analysis_cache_.Invalidate(*instruction);
}
void OnFusingInstruction(HloInstruction* fusion,
HloInstruction* original_producer,
HloInstruction* original_consumer) {
if (fusion_process_dump_) {
auto* fusion_step =
fusion_process_dump_->add_fusion_steps()->mutable_fusion();
fusion_step->set_fusion_name(std::string(fusion->name()));
fusion_step->set_producer_name(std::string(original_producer->name()));
fusion_step->set_consumer_name(std::string(original_consumer->name()));
}
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("Fused |", original_producer->name(), "| into |",
fusion->name(), "| inside PriorityFusion"),
*fusion);
}
if (fusion != original_consumer) {
RemoveInstruction(original_consumer);
}
if (original_producer->user_count() == 0) {
original_producer->DetachFromOperandsAndUsers();
}
for (HloInstruction* operand : fusion->operands()) {
if (operand == original_producer ||
operand->opcode() == HloOpcode::kConstant ||
operand->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
if (!operand->IsFusible()) {
continue;
}
to_update_priority_.insert(operand);
}
to_update_priority_.insert(fusion);
}
void RemoveInstruction(HloInstruction* instruction) {
to_update_priority_.erase(instruction);
fusion_analysis_cache_.Invalidate(*instruction);
auto reverse_it = reverse_map_.find(instruction);
if (reverse_it == reverse_map_.end()) {
return;
}
producer_priority_queue_.erase(reverse_it->second);
reverse_map_.erase(reverse_it);
}
HloInstruction* current_producer() { return current_producer_; }
const std::vector<HloInstruction*>& current_consumers() {
return current_consumers_;
}
private:
Priority CalculateProducerPriority(HloInstruction* producer) {
if (producer->opcode() == HloOpcode::kBitcast) {
return std::numeric_limits<Priority>::max();
}
if (producer->opcode() == HloOpcode::kConstant) {
return std::numeric_limits<Priority>::min();
}
if (auto fusion_decision = CanFuseWithAllNonBitcastUsers(producer);
!fusion_decision) {
if (fusion_process_dump_) {
absl::MutexLock lock(&fusion_process_dump_mutex_);
auto* step = fusion_process_dump_->add_fusion_steps()
->mutable_producer_ineligible();
step->set_producer_name(std::string(producer->name()));
step->set_reason(fusion_decision.Explain());
}
return std::numeric_limits<Priority>::min();
}
GpuPerformanceModel::RunTimes run_times =
GpuPerformanceModel::EstimateRunTimesForPriorityFusion(
producer, &cost_analysis_,
GpuPerformanceModelOptions::PriorityFusion(
&fusion_analysis_cache_, &gpu_performance_model_cache_),
producer->users());
if (fusion_process_dump_) {
absl::MutexLock lock(&fusion_process_dump_mutex_);
auto* step =
fusion_process_dump_->add_fusion_steps()->mutable_update_priority();
step->set_producer_name(std::string(producer->name()));
for (auto* consumer : producer->users()) {
step->add_consumer_names(std::string(consumer->name()));
}
step->set_us_fused(absl::ToDoubleMicroseconds(run_times.time_fused));
step->set_us_unfused(absl::ToDoubleMicroseconds(run_times.time_unfused));
}
return absl::ToInt64Nanoseconds(run_times.time_unfused -
run_times.time_fused);
}
FusionDecision CanFuseTriton(HloInstruction* producer,
HloInstruction* consumer) {
if (!triton_softmax_priority_fusion_enabled_) {
return "triton softmax fusion is not enabled";
}
if (IsGenericTritonFusion(*producer)) {
if (!IsFusible(*consumer)) {
return "the consumer is not fusible";
}
} else {
if (!IsFusible(*producer)) {
return "the producer is not fusible";
}
}
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
SymbolicTileAnalysisOrError symbolic_tile_analysis_or =
SymbolicTileAnalysis::AnalyzeFusion(*fusion, mlir_context_);
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&symbolic_tile_analysis_or)) {
return {
absl::StrCat("Fusion can not be tiled with SymbolicTileAnalysis: ",
fusion_decision->Explain())};
}
return {};
}
FusionDecision CanFuse(HloInstruction* producer, HloInstruction* consumer) {
if (IsGenericTritonFusion(*producer) || IsGenericTritonFusion(*consumer)) {
return CanFuseTriton(producer, consumer);
}
if (!IsFusible(*producer)) {
return "the producer is not fusible";
}
if (!IsFusible(*consumer)) {
return "the consumer is not fusible";
}
if (consumer->opcode() == HloOpcode::kBitcast) {
return "not fusing into a single bitcast as consumer";
}
if (auto can_fuse = CanEmitInputFusedScatter(*producer, *consumer);
!can_fuse) {
return can_fuse;
}
auto contains_significant_reduce = [&](const HloInstruction* instr) {
auto fusion = HloFusionAdaptor::ForInstruction(instr);
return HloAnyOf(fusion->GetRoots(), *fusion, [](auto node) {
if (!(node.opcode() == HloOpcode::kReduce && node.shape().IsArray())) {
return false;
}
int64_t reduction_size =
ShapeUtil::ElementsIn(node.instruction().operand(0)->shape()) /
ShapeUtil::ElementsIn(node.shape());
return reduction_size >= 16;
});
};
if (contains_significant_reduce(producer) &&
contains_significant_reduce(consumer)) {
return "both the producer and the consumer contain a reduce";
}
const auto& analysis = fusion_analysis_cache_.Get(*producer);
if (analysis.GetEmitterFusionKind() ==
HloFusionAnalysis::EmitterFusionKind::kReduction) {
const auto& analysis_fused =
fusion_analysis_cache_.Get(*producer, *consumer);
if (analysis_fused.GetEmitterFusionKind() ==
HloFusionAnalysis::EmitterFusionKind::kLoop) {
return "fusion into output of a reduce fusion would create a loop "
"fusion";
}
}
if (auto fits_budget = FusionFitsInBudget(
*consumer, *producer, *cost_analysis_.device_info_,
true);
!fits_budget) {
return fits_budget;
}
if (cost_analysis_.ProducerConsumerMergedTooLarge(*producer, *consumer)) {
return "the fusion would result in an overly large code duplication";
}
if (producer == producer->parent()->root_instruction()) {
return "not fusing into the output of the root instruction";
}
return InstructionFusion::ShouldFuseInPlaceOp(producer, consumer);
}
FusionDecision CanFuseCached(HloInstruction* producer,
HloInstruction* consumer) {
{
absl::MutexLock lock(&can_fuse_cache_mutex_);
auto& producer_cache = can_fuse_cache_[producer];
auto it = producer_cache.find(consumer);
if (it != producer_cache.end()) {
return it->second;
}
}
auto fusion_decision = CanFuse(producer, consumer);
{
absl::MutexLock lock(&can_fuse_cache_mutex_);
can_fuse_cache_[producer][consumer] = fusion_decision;
}
return fusion_decision;
}
FusionDecision CanFuseWithAllNonBitcastUsers(HloInstruction* producer) {
if (producer->users().empty()) {
return "No users to fuse";
}
FusionDecision result;
bool has_non_bitcast_user = false;
for (const auto& user : producer->users()) {
if (user->opcode() == HloOpcode::kBitcast) {
continue;
}
has_non_bitcast_user = true;
if (auto fusion_decision = CanFuseCached(producer, user);
!fusion_decision) {
VLOG(10) << "Cannot fuse " << producer->name() << " with "
<< user->name() << ", because: " << fusion_decision.Explain();
return fusion_decision;
}
}
if (!has_non_bitcast_user) {
return "not fusing because there are only bitcast users";
}
return {};
}
HloComputation* computation_;
GpuHloCostAnalysis cost_analysis_;
using PriorityQueue = std::map<std::pair<Priority, int>, HloInstruction*>;
PriorityQueue producer_priority_queue_;
absl::flat_hash_map<HloInstruction*, PriorityQueue::iterator> reverse_map_;
HloInstruction* current_producer_;
std::vector<HloInstruction*> current_consumers_;
absl::flat_hash_set<HloInstruction*> to_update_priority_;
FusionProcessDumpProto* fusion_process_dump_;
absl::Mutex fusion_process_dump_mutex_;
tsl::thread::ThreadPool* thread_pool_;
mlir::MLIRContext* mlir_context_;
HloFusionAnalysisCache& fusion_analysis_cache_;
absl::flat_hash_map<
const HloInstruction*,
absl::flat_hash_map<const HloInstruction*, FusionDecision>>
can_fuse_cache_;
absl::Mutex can_fuse_cache_mutex_;
GpuPerformanceModelCache gpu_performance_model_cache_;
bool triton_softmax_priority_fusion_enabled_;
bool dump_fusion_visualization_;
};
}
bool GpuPriorityFusion::IsExpensive(
const HloInstruction& instruction) {
switch (instruction.opcode()) {
case HloOpcode::kDivide:
case HloOpcode::kSqrt:
case HloOpcode::kRsqrt:
case HloOpcode::kExp:
if (ElementIsF32OrF16(instruction.shape())) {
return false;
}
break;
case HloOpcode::kFusion:
return false;
default:
break;
}
return InstructionFusion::IsExpensive(instruction);
}
bool IsSmallConstant(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kConstant && instr->shape().IsArray() &&
ShapeUtil::ElementsIn(instr->shape()) <= 1;
}
bool GpuPriorityFusion::ConsumeFuel(HloInstruction* producer,
HloInstruction* consumer) {
return xla::ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing producer %s with consumer %s",
producer->name(), consumer->name());
});
};
absl::StatusOr<bool> GpuPriorityFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool dump_enabled =
DumpingEnabledForHloPass(name(), module->config().debug_options());
if (dump_enabled) {
fusion_process_dump_ = std::make_unique<FusionProcessDumpProto>();
*fusion_process_dump_->mutable_gpu_device_info() =
device_info_.ToGpuProto();
}
auto fusible_computations =
GetFusibleComputations(*module, execution_threads);
for (auto* computation : fusible_computations) {
for (auto* instruction : computation->instructions()) {
module->SetAndUniquifyInstrName(instruction,
absl::StrCat(instruction->name(), ".0"));
}
}
if (dump_enabled) {
fusion_process_dump_->set_hlo_module_before_fusion(
module->ToString(HloPrintOptions::ShortParsable()));
}
bool triton_softmax_priority_fusion_enabled =
module->config()
.debug_options()
.xla_gpu_enable_triton_softmax_priority_fusion();
int changed = false;
for (auto* computation : fusible_computations) {
CHECK(!computation->IsFusionComputation());
auto fusion_queue = std::make_unique<GpuPriorityFusionQueue>(
computation, cost_analysis_options_, &device_info_,
fusion_process_dump_.get(), thread_pool_, &mlir_context_,
fusion_analysis_cache_, triton_softmax_priority_fusion_enabled);
while (fusion_queue->DequeueNextProducer()) {
auto producer = fusion_queue->current_producer();
for (auto* consumer : fusion_queue->current_consumers()) {
if (consumer->opcode() == HloOpcode::kBitcast) {
continue;
}
if (!ConsumeFuel(producer, consumer)) continue;
VLOG(5) << "next: " << consumer->name() << "(" << consumer << ") + "
<< producer->name() << "(" << producer << ")";
fusion_queue->PreFusion(producer, consumer);
auto fusion_instruction = Fuse(producer, consumer, computation);
fusion_queue->OnFusingInstruction(fusion_instruction, producer,
consumer);
changed = true;
}
if (producer->user_count() == 0) {
fusion_queue->RemoveInstruction(producer);
TF_RETURN_IF_ERROR(computation->RemoveInstruction(producer));
}
fusion_queue->UpdatePriorities();
}
std::vector<HloInstruction*> constants;
for (auto* instruction : computation->instructions()) {
if (IsSmallConstant(instruction)) {
constants.push_back(instruction);
}
}
for (auto* constant : constants) {
auto users = constant->users();
for (auto* user : users) {
if (IsFusible(*user) && CanEmitInputFusedScatter(*constant, *user)) { | #include "xla/service/gpu/priority_fusion.h"
#include <stdint.h>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
namespace xla {
namespace gpu {
class PriorityFusionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
std::vector<HloFusionAnalysis::EmitterFusionKind> RunAndGetFusionKinds(
absl::string_view hlo) {
auto module = ParseAndReturnVerifiedModule(hlo).value();
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->RemoveUnusedComputations(), IsOk());
std::vector<HloFusionAnalysis::EmitterFusionKind> kinds;
for (auto computation : module->computations()) {
if (!computation->FusionInstruction()) continue;
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto analysis = HloFusionAnalysis::Create(
Cast<HloFusionInstruction>(computation->FusionInstruction()),
&device_info);
kinds.push_back(analysis.GetEmitterFusionKind());
}
return kinds;
}
GpuPriorityFusion priority_fusion_{
nullptr, TestGpuDeviceInfo::RTXA6000DeviceInfo(),
GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(),
{},
true}};
};
TEST_F(PriorityFusionTest, FuseWithSharedArgument) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%subtract = f32[] subtract(%p0, %p1)
%compare = pred[] compare(%subtract, %subtract), direction=NE
%add = f32[] add(%p0, %p1)
%abs = f32[] abs(%subtract)
ROOT %select = f32[] select(%compare, %add, %abs)
})")
.value();
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kLoop);
}
TEST_F(PriorityFusionTest, FusionFusionWithDuplication) {
absl::string_view kHlo = R"(
HloModule test_module
square {
p = f32[16384]{0} parameter(0)
ROOT m = f32[16384]{0} multiply(p, p)
}
exp {
p = f32[16384]{0} parameter(0)
ROOT e = f32[16384]{0} exponential(p)
}
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
ENTRY main {
p = f32[16384]{0} parameter(0)
s = f32[16384]{0} fusion(p), kind=kLoop, calls=square
e = f32[16384]{0} fusion(s), kind=kLoop, calls=exp
l = f32[16384]{0} fusion(s), kind=kInput, calls=log
ROOT t = (f32[16384], f32[16384]) tuple(l, e)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f32[16384]{0} parameter(0)
CHECK-NEXT: %[[FUSION_0:.*]] = f32[16384]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[FUSION_1:.*]] = f32[16384]{0} fusion(%[[PARAM]])
CHECK-NEXT: ROOT {{.*}} tuple(%[[FUSION_0]], %[[FUSION_1]])
)");
}
TEST_F(PriorityFusionTest, FuseBroadcastIntoBitcastConsumers) {
absl::string_view kHlo = R"(
HloModule test_module
ENTRY main {
param_0 = f32[96]{0} parameter(0)
broadcast = f32[8,96,128,7]{3,2,1,0} broadcast(param_0), dimensions={1}
bitcast.6079.2 = f32[8,24,4,128,7]{4,3,2,1,0} bitcast(broadcast)
ROOT transpose.1990.2 = f32[8,24,128,7,4]{4,3,2,1,0} transpose(bitcast.6079.2), dimensions={0,1,3,4,2}
}
)";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f32[96]{0} parameter(0)
CHECK-NEXT: ROOT %{{.*}} fusion(%[[PARAM]])
)");
}
TEST_F(PriorityFusionTest, FuseWideningConvertIntoConsumers) {
absl::string_view kHlo = R"(
HloModule test_module
ENTRY main {
p = f16[512]{0} parameter(0)
a = f16[512]{0} add(p, p)
c = f32[512]{0} convert(a)
s = f32[512]{0} multiply(c, c)
bc = s32[512]{0} bitcast(c)
ROOT t = (f32[512], s32[512]) tuple(s, bc)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f16[512]{0} parameter(0)
CHECK-NEXT: %[[FUSION_F32:.*]] = f32[512]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[CONVERT_FUSION:.*]] = f32[512]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[BITCAST:.*]] = s32[512]{0} bitcast(%[[CONVERT_FUSION]])
CHECK-NEXT: ROOT %{{.*}} = (f32[512]{0}, s32[512]{0}) tuple(%[[FUSION_F32]], %[[BITCAST]])
)");
}
TEST_F(PriorityFusionTest, FuseConvertIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
param_0_0.79 = bf16[1024,8192]{1,0} parameter(0)
param_1_0.79 = bf16[1024,8192]{1,0} parameter(1)
param_2.483 = f32[8192]{0} parameter(2)
param_4.2892 = bf16[1024,8192]{1,0} parameter(3)
convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79)
convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79)
constant_7773 = f32[] constant(0)
broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1}
multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854)
reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add
convert.13970 = bf16[1024]{0} convert(reduce.4813)
convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892)
multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534)
reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1)
multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855)
reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1)
ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK-COUNT-3: ROOT {{.*}} convert(
CHECK: ENTRY %main
CHECK-COUNT-3: fusion
)");
}
TEST_F(PriorityFusionTest, ReductionEpilogueFusionRegressionTest) {
absl::string_view kHlo = R"(
HloModule test_module
add {
rhs.407 = f32[] parameter(1)
lhs.407 = f32[] parameter(0)
ROOT add.24451 = f32[] add(lhs.407, rhs.407)
}
ENTRY main {
param_1.15162 = f32[2752]{0} parameter(1)
convert.44829 = bf16[2752]{0} convert(param_1.15162)
bitcast.24686 = bf16[1,1,2752]{2,1,0} bitcast(convert.44829)
convert.44468 = f32[1,1,2752]{2,1,0} convert(bitcast.24686)
constant_13722 = bf16[] constant(1)
convert.17451 = f32[] convert(constant_13722)
broadcast.17565 = f32[1,1,2752]{2,1,0} broadcast(convert.17451), dimensions={}
negate.167 = f32[1,1,2752]{2,1,0} negate(convert.44468)
exponential.569 = f32[1,1,2752]{2,1,0} exponential(negate.167)
add.1850 = f32[1,1,2752]{2,1,0} add(broadcast.17565, exponential.569)
divide.1376 = f32[1,1,2752]{2,1,0} divide(broadcast.17565, add.1850)
multiply.9709 = f32[1,1,2752]{2,1,0} multiply(convert.44468, divide.1376)
param_0.15005 = f32[2752]{0} parameter(0)
convert.44826 = bf16[2752]{0} convert(param_0.15005)
bitcast.24683 = bf16[1,1,2752]{2,1,0} bitcast(convert.44826)
convert.44467 = f32[1,1,2752]{2,1,0} convert(bitcast.24683)
multiply.9708 = f32[1,1,2752]{2,1,0} multiply(multiply.9709, convert.44467)
convert.16959 = bf16[1,1,2752]{2,1,0} convert(multiply.9708)
fusion.3203 = bf16[2752]{0} bitcast(convert.16959)
convert.15093 = f32[2752]{0} convert(fusion.3203)
broadcast.13841 = f32[8192,2752]{1,0} broadcast(convert.15093), dimensions={1}
param_0.15525 = bf16[8192,2752]{1,0} parameter(2)
convert.13738 = f32[8192,2752]{1,0} convert(param_0.15525)
multiply.6422 = f32[8192,2752]{1,0} multiply(broadcast.13841, convert.13738)
constant_14382 = f32[] constant(0)
fusion.339 = f32[8192]{0} reduce(multiply.6422, constant_14382), dimensions={1}, to_apply=add
convert.44633 = bf16[8192]{0} convert(fusion.339)
ROOT bitcast.24487 = bf16[1,1,8192]{2,1,0} bitcast(convert.44633)
}
)";
EXPECT_THAT(
RunAndGetFusionKinds(kHlo),
UnorderedElementsAre(HloFusionAnalysis::EmitterFusionKind::kLoop,
HloFusionAnalysis::EmitterFusionKind::kReduction));
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK: ROOT {{.*}} bitcast({{.*}}fusion{{.*}})
)");
}
TEST_F(PriorityFusionTest, DoNotChangeReductionFusionToLoopFusion) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
rhs.407 = f32[] parameter(1)
lhs.407 = f32[] parameter(0)
ROOT add.24451 = f32[] add(lhs.407, rhs.407)
}
fused_computation {
p0 = f32[16,64]{1,0} parameter(0)
zero = f32[] constant(0.0)
ROOT reduce = f32[16]{0} reduce(p0, zero), dimensions={1}, to_apply=add
}
ENTRY main {
param0 = f32[16,64]{1,0} parameter(0)
fusion = f32[16]{0} fusion(param0), kind=kLoop, calls=fused_computation
ROOT slice = f32[8]{0} slice(fusion), slice={[0:8]}
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DoNotFuseTransposeIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
Arg_1.1046 = f32[] parameter(1)
Arg_0.1045 = f32[] parameter(0)
ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046)
}
ENTRY main {
param_0.17323 = pred[2048,2048]{1,0} parameter(0)
broadcast.22829 = pred[1,12,2048,2048]{3,2,1,0} broadcast(param_0.17323), dimensions={2,3}
param_1.19761 = bf16[2048,24576]{1,0} parameter(1)
convert.29880.clone.1 = f32[2048,24576]{1,0} convert(param_1.19761)
constant_10033_clone_1 = bf16[] constant(0.02002)
convert.30056.clone.1 = f32[] convert(constant_10033_clone_1)
broadcast.18898.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30056.clone.1), dimensions={}
multiply.13451.clone.1 = f32[2048,24576]{1,0} multiply(convert.29880.clone.1, broadcast.18898.clone.1)
tanh.798.clone.1 = f32[2048,24576]{1,0} tanh(multiply.13451.clone.1)
constant_10244_clone_1 = bf16[] constant(50)
convert.30039.clone.1 = f32[] convert(constant_10244_clone_1)
broadcast.18310.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30039.clone.1), dimensions={}
multiply.12550.clone.1 = f32[2048,24576]{1,0} multiply(tanh.798.clone.1, broadcast.18310.clone.1)
convert.29370.clone.1 = bf16[2048,24576]{1,0} convert(multiply.12550.clone.1)
bitcast.22330 = bf16[1,2048,2048,12]{3,2,1,0} bitcast(convert.29370.clone.1)
transpose.6582 = bf16[1,12,2048,2048]{3,2,1,0} transpose(bitcast.22330), dimensions={0,3,2,1}
convert.33705 = f32[1,12,2048,2048]{3,2,1,0} convert(transpose.6582)
constant_10212 = f32[] constant(-2.38197633e+38)
broadcast.22828 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10212), dimensions={}
select.589 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22829, convert.33705, broadcast.22828)
bitcast.22075 = f32[12,2048,2048]{2,1,0} bitcast(select.589)
constant_10192 = f32[] constant(-inf)
reduce.1614 = f32[12,2048]{1,0} reduce(bitcast.22075, constant_10192), dimensions={2}, to_apply=add
predarg = pred[1,1,2048,2048]{3,2,1,0} parameter(2)
bitcast.11069 = pred[2048,2048]{1,0} bitcast(predarg)
broadcast.22825 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3}
bitcast.22331 = bf16[1,2048,2048,12]{3,2,1,0} bitcast(convert.29370.clone.1)
transpose.6580 = bf16[1,12,2048,2048]{3,2,1,0} transpose(bitcast.22331), dimensions={0,3,2,1}
convert.33703 = f32[1,12,2048,2048]{3,2,1,0} convert(transpose.6580)
constant_10213 = f32[] constant(-2.38197633e+38)
broadcast.22824 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10213), dimensions={}
select.587 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22825, convert.33703, broadcast.22824)
broadcast.22819 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2}
subtract.1129 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.587, broadcast.22819)
exponential.418 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1129)
bitcast.22074 = f32[12,2048,2048]{2,1,0} bitcast(exponential.418)
constant_10490 = f32[] constant(0)
reduce.1613 = f32[12,2048]{1,0} reduce(bitcast.22074, constant_10490), dimensions={2}, to_apply=add
constant_468 = f32[] constant(-2.38197633e+38)
broadcast.22833 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3}
bitcast.22332 = bf16[1,2048,2048,12]{3,2,1,0} bitcast(convert.29370.clone.1)
transpose.6584 = bf16[1,12,2048,2048]{3,2,1,0} transpose(bitcast.22332), dimensions={0,3,2,1}
convert.33707 = f32[1,12,2048,2048]{3,2,1,0} convert(transpose.6584)
broadcast.22832 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_468), dimensions={}
select.591 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22833, convert.33707, broadcast.22832)
broadcast.22821 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2}
subtract.1131 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.591, broadcast.22821)
exponential.420 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1131)
broadcast.18351 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1613), dimensions={1,2}
divide.340 = f32[1,12,2048,2048]{3,2,1,0} divide(exponential.420, broadcast.18351)
ROOT convert.29418 = bf16[1,12,2048,2048]{3,2,1,0} convert(divide.340)
})";
using Kind = HloFusionAnalysis::EmitterFusionKind;
EXPECT_THAT(
RunAndGetFusionKinds(kHlo),
UnorderedElementsAre(Kind::kLoop, Kind::kLoop, Kind::kLoop,
Kind::kReduction, Kind::kReduction, Kind::kTranspose,
Kind::kTranspose, Kind::kTranspose));
}
TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
c0 = f32[] constant(0)
r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add
ROOT r1 = f32[8,4]{1,0} reduce(r0, c0), dimensions={2}, to_apply=add
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} reduce(
CHECK: ROOT {{.*}} reduce(
)");
}
TEST_F(PriorityFusionTest, ConvertFusedIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
param_0_0.79 = bf16[1024,8192]{1,0} parameter(0)
param_1_0.79 = bf16[1024,8192]{1,0} parameter(1)
param_2.483 = f32[8192]{0} parameter(2)
param_4.2892 = bf16[1024,8192]{1,0} parameter(3)
convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79)
convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79)
constant_7773 = f32[] constant(0)
broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1}
multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854)
reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add
convert.13970 = bf16[1024]{0} convert(reduce.4813)
convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892)
multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534)
reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1)
multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855)
reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1)
ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK-COUNT-3: ROOT {{.*}} convert(
CHECK: ENTRY %main
CHECK-COUNT-3: fusion(
CHECK-NOT: fusion(
)");
}
TEST_F(PriorityFusionTest, DoNotFuseDynamicUpdateSliceIntoReduce) {
GTEST_SKIP() << "b/294198633";
absl::string_view kHlo = R"(
HloModule test_module
add {
Arg_1.1046 = f32[] parameter(1)
Arg_0.1045 = f32[] parameter(0)
ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046)
}
ENTRY main {
param_0.10549 = f32[4,2112]{1,0} parameter(0)
param_5.2561 = pred[] parameter(5)
broadcast.19725 = pred[4,1]{1,0} broadcast(param_5.2561), dimensions={}
param_1.11587 = pred[4]{0} parameter(1)
constant_5837 = f32[] constant(1)
broadcast.19723 = f32[4]{0} broadcast(constant_5837), dimensions={}
param_2.5952 = f32[4,8000]{1,0} parameter(2)
param_3.4004 = f32[4]{0} parameter(3)
broadcast.19718 = f32[4,8000]{1,0} broadcast(param_3.4004), dimensions={0}
subtract.1112 = f32[4,8000]{1,0} subtract(param_2.5952, broadcast.19718)
exponential.418 = f32[4,8000]{1,0} exponential(subtract.1112)
constant_6254 = f32[] constant(0)
reduce.1154 = f32[4]{0} reduce(exponential.418, constant_6254), dimensions={1}, to_apply=add
log.38 = f32[4]{0} log(reduce.1154)
broadcast.19717 = f32[4,8000]{1,0} broadcast(log.38), dimensions={0}
subtract.1111 = f32[4,8000]{1,0} subtract(subtract.1112, broadcast.19717)
iota.170 = s32[4,1]{1,0} iota(), iota_dimension=0
constant_6281 = s32[] constant(0)
broadcast.19735 = s32[4]{0} broadcast(constant_6281), dimensions={}
param_4.3400 = s32[4,8000]{1,0} parameter(4)
slice.3186 = s32[4,40]{1,0} slice(param_4.3400), slice={[0:4], [0:40]}
iota.168 = s32[4,1]{1,0} iota(), iota_dimension=0
param_7.1596 = s32[4]{0} parameter(7)
compare.341 = pred[4]{0} compare(param_7.1596, broadcast.19735), direction=LT
constant_5833 = s32[] constant(40)
broadcast.19731 = s32[4]{0} broadcast(constant_5833), dimensions={}
add.8348 = s32[4]{0} add(param_7.1596, broadcast.19731)
select.418 = s32[4]{0} select(compare.341, add.8348, param_7.1596)
bitcast.20942 = s32[4,1]{1,0} bitcast(select.418)
concatenate.1337 = s32[4,2]{1,0} concatenate(iota.168, bitcast.20942), dimensions={1}
gather.43 = s32[4,1,1]{2,1,0} gather(slice.3186, concatenate.1337), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1}
bitcast.20941 = s32[4]{0} bitcast(gather.43)
select.398 = s32[4]{0} select(param_1.11587, broadcast.19735, bitcast.20941)
compare.334 = pred[4]{0} compare(select.398, broadcast.19735), direction=LT
constant_6260 = s32[] constant(8000)
broadcast.19720 = s32[4]{0} broadcast(constant_6260), dimensions={}
add.8336 = s32[4]{0} add(select.398, broadcast.19720)
select.396 = s32[4]{0} select(compare.334, add.8336, select.398)
bitcast.20830 = s32[4,1]{1,0} bitcast(select.396)
concatenate.1308 = s32[4,2]{1,0} concatenate(iota.170, bitcast.20830), dimensions={1}
gather.41 = f32[4,1,1]{2,1,0} gather(subtract.1111, concatenate.1308), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1}
bitcast.20824 = f32[4]{0} bitcast(gather.41)
select.389 = f32[4]{0} select(param_1.11587, broadcast.19723, bitcast.20824)
bitcast.20823 = f32[4,1]{1,0} bitcast(select.389)
param_6.1719 = s32[] parameter(6)
constant_6323 = s32[] constant(2048)
add.8549 = s32[] add(param_6.1719, constant_6323)
compare.388 = pred[] compare(add.8549, constant_6281), direction=LT
constant_5436 = s32[] constant(4160)
add.8339 = s32[] add(param_6.1719, constant_5436)
select.409 = s32[] select(compare.388, add.8339, add.8549)
dynamic-slice.36 = f32[4,1]{1,0} dynamic-slice(param_0.10549, constant_6281, select.409), dynamic_slice_sizes={4,1}
select.388 = f32[4,1]{1,0} select(broadcast.19725, bitcast.20823, dynamic-slice.36)
ROOT dynamic-update-slice.307 = f32[4,2112]{1,0} dynamic-update-slice(param_0.10549, select.388, constant_6281, select.409)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} dynamic-update-slice(
CHECK: %[[REDUCE:.*]] = {{.*}} reduce(
CHECK: ROOT {{.*}} log(%[[REDUCE]])
CHECK: ENTRY
CHECK-COUNT-2: fusion(
)");
}
TEST_F(PriorityFusionTest, DontFuseIntoFirstOperandOfScatter) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT add = s32[3,3] add(scatter, scatter)
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion())));
EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add())));
}
TEST_F(PriorityFusionTest, DontFuseConstantIntoFirstOperandOfScatter) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
operand = s32[1] constant({0})
indices = s32[24,1] parameter(0)
constant = s32[] constant(1)
updates = s32[24,1] broadcast(constant)
ROOT scatter = s32[1] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Constant(), m::Parameter())));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Parameter(),
m::Broadcast(m::Constant()))));
}
TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduceEvenIfOccupancyIsHigh) {
constexpr absl::string_view kHlo = R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
p0 = f32[4,3584,128,168]{3,2,1,0} parameter(0)
c = f32[] constant(0)
r1 = f32[4,3584,128]{2,1,0} reduce(p0, c), dimensions={3}, to_apply=add
ROOT r2 = f32[4,3584]{1,0} reduce(r1, c), dimensions={2}, to_apply=add
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} reduce(
CHECK: ROOT {{.*}} reduce(
)");
}
TEST_F(PriorityFusionTest, FuseReductionEpilogueWithMultipleUsers) {
constexpr absl::string_view kHlo = R"(
HloModule test_module
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
fused_computation {
p0 = f32[64,16384]{1,0} parameter(0)
c0 = f32[] constant(0)
ROOT reduce.858 = f32[64]{0} reduce(p0, c0), dimensions={1}, to_apply=add
}
ENTRY main {
p0 = f32[64,16384]{1,0} parameter(0)
fusion = f32[64]{0} fusion(p0), kind=kInput, calls=fused_computation
log = f32[64]{0} log(fusion)
negate = f32[64]{0} custom-call(log), custom_call_target="negate"
ROOT add = f32[64]{0} add(negate, log)
}
)";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK: %[[PARAM:.*]] = {{.*}} parameter(0)
CHECK: %[[FUSION:.*]] = {{.*}} fusion(%[[PARAM]])
CHECK: custom-call(%[[FUSION]])
)");
}
TEST_F(PriorityFusionTest, EpilogueFusion) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
fused_computation.1 {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
c0 = f32[] constant(0)
ROOT r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add
}
fused_computation.2 {
p0 = f32[8,4,128]{2,1,0} parameter(0)
r1 = f32[8,4,128]{2,1,0} log(p0)
ROOT r2 = f32[8,4,128]{2,1,0} log(r1)
}
ENTRY main {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
f1 = f32[8,4,128]{2,1,0} fusion(p0), kind=kInput, calls=%fused_computation.1
ROOT fusion = f32[8,4,128]{2,1,0} fusion(f1), kind=kLoop, calls=%fused_computation.2
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} = f32[8,4,128]{2,1,0} fusion(%p{{.*}}), kind=kInput, calls=%fused_computation)");
}
TEST_F(PriorityFusionTest, EpilogueFusionFails) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
fused_computation.1 {
p0 = f32[28672,4096]{1,0} parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[28672]{0} reduce(p0, c0), dimensions={1}, to_apply=add
}
fused_computation.2 {
p0 = f32[28672]{0} parameter(0)
p1 = f32[28672]{0} parameter(1)
ROOT a = f32[28672]{0} add(p0, p1)
}
ENTRY main {
p0 = f32[28672,4096]{1,0} parameter(0)
p1 = f32[28672]{0} parameter(1)
f = f32[28672]{0} fusion(p0), kind=kInput, calls=%fused_computation.1
ROOT fusion = f32[28672]{0} fusion(f,p1), kind=kLoop, calls=%fused_computation.2
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DoNotFuseIntoRoot) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY %main (p.0: u32[2], p.1: u32[]) -> u32[2] {
%p.0 = u32[2]{0} parameter(0)
%p.1 = u32[] parameter(1)
ROOT %broadcast = u32[2]{0} broadcast(u32[] %p.1), dimensions={}, sharding={replicated}
%add = u32[2]{0} add(u32[2]{0} %p.0, u32[2]{0} %broadcast)
%tuple.1 = (u32[2]{0}) tuple(u32[2]{0} %add)
%token.0 = token[] after-all()
%outfeed.6 = token[] outfeed((u32[2]{0}) %tuple.1, token[] %token.0), outfeed_shape=(u32[2]{0}), sharding={maximal device=0}
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DontFuseConcat) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule module
%maximum (param_0: f32[], param_1: f32[]) -> f32[] {
%param_0 = f32[] parameter(0)
%param_1 = f32[] parameter(1)
ROOT %maximum = f32[] maximum(f32[] %param_0, f32[] %param_1)
}
%fused_concat (param_0: f32[1,4,401,8,8], param_1: f32[1,1,4,1023,8], param_2: bf16[1,4,1023,8,8]) -> f32[1,4,1424,8,8] {
%param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2)
%convert = f32[1,4,1023,8,8]{4,3,2,1,0} convert(bf16[1,4,1023,8,8]{4,3,2,1,0} %param_2)
%param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1)
%bitcast = f32[4,1023,8]{2,1,0} bitcast(f32[1,1,4,1023,8]{4,3,2,1,0} %param_1)
%broadcast = f32[1,4,1023,8,8]{4,3,2,1,0} broadcast(f32[4,1023,8]{2,1,0} %bitcast), dimensions={1,2,4}
%add = f32[1,4,1023,8,8]{4,3,2,1,0} add(f32[1,4,1023,8,8]{4,3,2,1,0} %convert, f32[1,4,1023,8,8]{4,3,2,1,0} %broadcast)
%param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0)
ROOT %concatenate = f32[1,4,1424,8,8]{4,3,2,1,0} concatenate(f32[1,4,1023,8,8]{4,3,2,1,0} %add, f32[1,4,401,8,8]{4,3,2,1,0} %param_0), dimensions={2}
}
%fused_reduce (param_0: f32[], param_1: f32[1,4,1424,8,8]) -> f32[4,8,8] {
%param_1 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(1)
%bitcast = f32[4,1424,8,8]{3,2,1,0} bitcast(f32[1,4,1424,8,8]{4,3,2,1,0} %param_1)
%param_0 = f32[] parameter(0)
ROOT %reduce = f32[4,8,8]{2,1,0} reduce(f32[4,1424,8,8]{3,2,1,0} %bitcast, f32[] %param_0), dimensions={1}, to_apply=%maximum
}
%fused_broadcast (param_0: f32[1,4,1424,8,8], param_1: f32[4,8,8]) -> f32[1,4,1424,8,8] {
%param_0 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(0)
%param_1 = f32[4,8,8]{2,1,0} parameter(1)
%broadcast = f32[1,4,1424,8,8]{4,3,2,1,0} broadcast(f32[4,8,8]{2,1,0} %param_1), dimensions={1,3,4}
ROOT %subtract = f32[1,4,1424,8,8]{4,3,2,1,0} subtract(f32[1,4,1424,8,8]{4,3,2,1,0} %param_0, f32[1,4,1424,8,8]{4,3,2,1,0} %broadcast)
}
ENTRY fusion {
%param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0)
%param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1)
%param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2)
%concat = f32[1,4,1424,8,8]{4,3,2,1,0} fusion(%param_0, %param_1, %param_2), kind=kLoop, calls=fused_concat
%param_3 = f32[] parameter(3)
%reduce = f32[4,8,8]{2,1,0} fusion(%param_3, %concat), kind=kLoop, calls=fused_reduce
%param_4 = f32[4,8 | 2,039 |
#ifndef XLA_SERVICE_GPU_COMMAND_BUFFER_SCHEDULING_H_
#define XLA_SERVICE_GPU_COMMAND_BUFFER_SCHEDULING_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla::gpu {
class CommandBufferScheduling : public HloModulePass {
public:
struct CommandBufferConfig {
absl::flat_hash_set<DebugOptions::CommandBufferCmdType> enabled_commands;
const se::DeviceDescription& device_description;
};
CommandBufferScheduling(const se::DeviceDescription& device_description,
int32_t gpu_toolkit_version,
int32_t gpu_driver_version);
absl::string_view name() const override {
return "command-buffer-scheduling";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
static std::vector<HloInstructionSequence> CollectCommandBufferSequences(
HloInstructionSequence schedule, const CommandBufferConfig& config,
int32_t min_num_commands = 1);
static absl::Status MoveParametersAndConstantsToFront(
HloComputation* computation);
struct CommandBuffer {
std::vector<HloInstruction*> arguments;
std::vector<HloInstruction*> results;
std::unique_ptr<HloComputation> computation;
absl::flat_hash_map<HloInstruction*, HloInstruction*> inst_mapping;
};
static absl::StatusOr<CommandBuffer> PrepareCommandBuffer(
const HloInstructionSequence& seq);
static absl::StatusOr<HloComputation*> RewriteCommandBuffer(
HloComputation* parent, const HloInstructionSequence& seq,
CommandBuffer command_buffer);
private:
se::DeviceDescription device_description_;
int32_t gpu_toolkit_version_;
int32_t gpu_driver_version_;
};
}
#endif
#include "xla/service/gpu/command_buffer_scheduling.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
using CommandBuffer = CommandBufferScheduling::CommandBuffer;
using CommandBufferConfig = CommandBufferScheduling::CommandBufferConfig;
static bool IsCommand(const HloComputation* computation,
const CommandBufferConfig& config);
static bool IsConstant(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kConstant;
}
static bool IsParameter(const HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kParameter;
}
static bool IsNoOp(const HloInstruction* hlo) {
return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple,
HloOpcode::kGetTupleElement>(hlo);
};
template <HloOpcode op>
static bool IsCommand(const HloInstruction*, const CommandBufferConfig&);
template <>
bool IsCommand<HloOpcode::kWhile>(const HloInstruction* hlo,
const CommandBufferConfig& config) {
return config.enabled_commands.contains(DebugOptions::CONDITIONALS) &&
IsCommand(hlo->while_body(), config) &&
IsCommand(hlo->while_condition(), config);
}
template <>
bool IsCommand<HloOpcode::kConditional>(const HloInstruction* hlo,
const CommandBufferConfig& config) {
return config.enabled_commands.contains(DebugOptions::CONDITIONALS) &&
absl::c_all_of(hlo->branch_computations(),
[&](const HloComputation* comp) {
return IsCommand(comp, config);
});
}
static bool IsCommand(const HloCustomCallInstruction* hlo,
const CommandBufferConfig& config) {
if (config.enabled_commands.contains(DebugOptions::CUBLAS) &&
IsLegacyCublasMatmul(*hlo)) {
return true;
}
if (config.enabled_commands.contains(DebugOptions::CUBLASLT) &&
(IsCublasLtMatmul(*hlo) || IsCublasLtMatmulF8(*hlo))) {
return true;
}
if (!config.enabled_commands.contains(DebugOptions::CUSTOM_CALL)) {
return false;
}
if (hlo->custom_call_target() == "triton_kernel_call" &&
!absl::StrContains(hlo->metadata().op_name(), "Autotuner")) {
return true;
}
auto registration = ffi::FindHandler(hlo->custom_call_target(), "gpu");
return registration.ok()
? ffi::IsCommandBufferCompatible(registration->traits)
: false;
}
static bool IsCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (auto* fusion = DynCast<HloFusionInstruction>(hlo)) {
auto gpu_config = fusion->backend_config<GpuBackendConfig>();
const FusionBackendConfig& backend_config =
gpu_config->fusion_backend_config();
if (backend_config.kind() == kCuDnnFusionKind) {
return config.enabled_commands.contains(DebugOptions::CUDNN);
}
const auto& custom_config = backend_config.custom_fusion_config();
if (custom_config.name() == "address_computation") {
auto fusion_analysis =
HloFusionAnalysis::Create(fusion, &config.device_description);
const HloFusionAdaptor& adaptor = fusion_analysis.fusion();
auto custom_call_adaptor = HloFindIf(
adaptor.GetRoots(), adaptor,
[](auto node) { return node.opcode() == HloOpcode::kCustomCall; });
const auto* custom_call = static_cast<const HloCustomCallInstruction*>(
&custom_call_adaptor->instruction());
return IsCommand(custom_call, config);
}
if (custom_config.name() == "dynamic_address_computation") {
return false;
}
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (auto* sort = DynCast<HloSortInstruction>(hlo))
return config.enabled_commands.contains(DebugOptions::FUSION);
if (hlo->opcode() == HloOpcode::kPartitionId ||
hlo->opcode() == HloOpcode::kReplicaId) {
return config.enabled_commands.contains(DebugOptions::FUSION);
}
if (auto* custom_call = DynCast<HloCustomCallInstruction>(hlo))
return IsCommand(custom_call, config);
if (hlo->opcode() == HloOpcode::kWhile)
return IsCommand<HloOpcode::kWhile>(hlo, config);
if (hlo->opcode() == HloOpcode::kConditional)
return IsCommand<HloOpcode::kConditional>(hlo, config);
return false;
}
static bool IsAsyncStartCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (hlo->opcode() == HloOpcode::kAllReduceStart ||
hlo->opcode() == HloOpcode::kAllGatherStart) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
if (hlo->opcode() == HloOpcode::kAsyncStart) {
if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
}
return false;
}
static bool IsAsyncDoneCommand(const HloInstruction* hlo,
const CommandBufferConfig& config) {
if (hlo->opcode() == HloOpcode::kAllReduceDone ||
hlo->opcode() == HloOpcode::kAllGatherDone) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
if (hlo->opcode() == HloOpcode::kAsyncDone) {
if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter) {
return config.enabled_commands.contains(DebugOptions::COLLECTIVES);
}
}
return false;
}
static HloInstruction* FindAsyncDoneCommand(const HloInstruction* start) {
if (start->opcode() == HloOpcode::kAllReduceStart ||
start->opcode() == HloOpcode::kAllGatherStart) {
CHECK(start->users().size() == 1);
return start->users().front();
} else if (start->opcode() == HloOpcode::kAsyncStart) {
return start->async_chain_done();
}
return nullptr;
}
static bool IsCommand(const HloComputation* computation,
const CommandBufferConfig& config) {
return absl::c_all_of(
computation->instructions(), [&](const HloInstruction* inst) {
return IsNoOp(inst) || IsConstant(inst) || IsParameter(inst) ||
IsCommand(inst, config) || IsAsyncStartCommand(inst, config) ||
IsAsyncDoneCommand(inst, config);
});
}
static void RemoveTrailingNoOps(HloInstructionSequence& seq) {
std::vector<HloInstruction*> instructions = seq.instructions();
for (int i = instructions.size() - 1; i >= 0; i--) {
if (HloInstruction* inst = instructions[i]; IsNoOp(inst)) {
seq.remove_instruction(inst);
} else {
break;
}
}
}
std::vector<HloInstructionSequence>
CommandBufferScheduling::CollectCommandBufferSequences(
const HloInstructionSequence schedule, const CommandBufferConfig& config,
int32_t min_num_commands) {
std::vector<HloInstructionSequence> sequences;
HloInstructionSequence current_seq;
int64_t num_commands_in_current_seq = 0;
auto collect_current_seq = [&]() {
if (num_commands_in_current_seq >= std::max(1, min_num_commands)) {
RemoveTrailingNoOps(current_seq);
sequences.push_back(std::move(current_seq));
}
current_seq = HloInstructionSequence();
num_commands_in_current_seq = 0;
};
auto& instructions = schedule.instructions();
auto collect_async_region = [&](const HloInstruction* start) {
auto get_index = [&](const HloInstruction* inst) -> size_t {
auto it = std::find(instructions.begin(), instructions.end(), inst);
return std::distance(instructions.begin(), it);
};
HloInstructionSequence seq;
size_t done_index = get_index(FindAsyncDoneCommand(start));
for (size_t i = get_index(start); i <= done_index; i++) {
HloInstruction* inst = instructions.at(i);
if (IsAsyncStartCommand(inst, config)) {
const HloInstruction* done = FindAsyncDoneCommand(inst);
done_index = std::max(done_index, get_index(done));
}
seq.push_back(inst);
}
return seq;
};
auto check_async_region = [&](const HloInstructionSequence& seq) {
if (!absl::c_all_of(seq.instructions(), [&](HloInstruction* inst) {
return IsNoOp(inst) || IsCommand(inst, config) ||
IsAsyncStartCommand(inst, config) ||
IsAsyncDoneCommand(inst, config);
})) {
return false;
}
absl::flat_hash_set<HloInstruction*> done_instructions;
for (const HloInstruction* inst : seq.instructions()) {
if (IsAsyncStartCommand(inst, config)) {
done_instructions.insert(FindAsyncDoneCommand(inst));
}
if (IsAsyncDoneCommand(inst, config)) {
if (!done_instructions.contains(inst)) {
return false;
}
}
}
return true;
};
for (size_t i = 0; i < instructions.size(); i++) {
HloInstruction* inst = instructions.at(i);
if (IsNoOp(inst) && num_commands_in_current_seq) {
current_seq.push_back(inst);
continue;
}
if (IsCommand(inst, config)) {
num_commands_in_current_seq++;
current_seq.push_back(inst);
continue;
}
if (IsAsyncStartCommand(inst, config)) {
HloInstructionSequence seq = collect_async_region(inst);
if (check_async_region(seq)) {
num_commands_in_current_seq += seq.instructions().size();
for (HloInstruction* inst : seq.instructions()) {
current_seq.push_back(inst);
}
i += seq.instructions().size() - 1;
continue;
}
}
collect_current_seq();
}
collect_current_seq();
return sequences;
}
absl::Status CommandBufferScheduling::MoveParametersAndConstantsToFront(
HloComputation* computation) {
HloInstructionSequence new_sequence;
HloSchedule& schedule = computation->parent()->schedule();
HloInstructionSequence& sequence = schedule.GetOrCreateSequence(computation);
for (HloInstruction* inst : sequence.instructions()) {
if (IsParameter(inst) || IsConstant(inst)) {
new_sequence.push_back(inst);
for (HloInstruction* control_predecessor : inst->control_predecessors()) {
for (HloInstruction* user : inst->users()) {
TF_RETURN_IF_ERROR(control_predecessor->AddControlDependencyTo(user));
}
}
TF_RETURN_IF_ERROR(inst->DropAllControlDeps());
}
}
for (HloInstruction* inst : sequence.instructions()) {
if (!IsParameter(inst) && !IsConstant(inst)) {
new_sequence.push_back(inst);
}
}
schedule.set_sequence(computation, new_sequence);
return absl::OkStatus();
}
absl::StatusOr<CommandBuffer> CommandBufferScheduling::PrepareCommandBuffer(
const HloInstructionSequence& seq) {
auto builder = HloComputation::Builder("command_buffer");
absl::Span<HloInstruction* const> instructions =
absl::MakeSpan(seq.instructions());
absl::flat_hash_set<HloInstruction*> in_command_buffer(instructions.begin(),
instructions.end());
absl::flat_hash_map<HloInstruction*, HloParameterInstruction*> parameters;
absl::flat_hash_map<HloInstruction*, HloInstruction*> inst_mapping;
auto mapped_operands = [&](HloInstruction* instr) {
absl::InlinedVector<HloInstruction*, 4> operands;
for (HloInstruction* operand : instr->operands()) {
if (auto it = inst_mapping.find(operand); it != inst_mapping.end())
operands.push_back(it->second);
}
return operands;
};
for (HloInstruction* inst : instructions) {
for (HloInstruction* operand : inst->operands()) {
if (parameters.contains(operand)) continue;
if (in_command_buffer.contains(operand)) continue;
int64_t parameter_id = parameters.size();
auto* parameter = Cast<HloParameterInstruction>(builder.AddInstruction(
HloInstruction::CreateParameter(parameter_id, operand->shape(),
absl::StrCat("p", parameter_id))));
inst_mapping[operand] = parameters[operand] = parameter;
}
}
for (HloInstruction* inst : seq.instructions()) {
HloCloneContext ctx(inst->GetModule());
for (HloComputation* called_computation : inst->called_computations()) {
if (called_computation->IsAsyncComputation()) {
called_computation->RemoveAsyncStart();
}
ctx.MapComputation(called_computation, called_computation);
}
inst_mapping[inst] = builder.AddInstruction(
inst->CloneWithNewOperands(inst->shape(), mapped_operands(inst), &ctx));
}
std::vector<HloInstruction*> arguments(parameters.size());
for (auto& [argument, parameter] : parameters) {
arguments[parameter->parameter_number()] = argument;
}
std::vector<HloInstruction*> results;
std::vector<HloInstruction*> returned;
auto has_external_users = [&](HloInstruction* inst) {
return inst->IsRoot() || absl::c_any_of(inst->users(), [&](auto* user) {
return !in_command_buffer.contains(user);
});
};
for (HloInstruction* inst : instructions) {
if (has_external_users(inst)) {
results.push_back(inst);
returned.push_back(inst_mapping[inst]);
}
}
if (returned.size() > 1) {
builder.AddInstruction(HloInstruction::CreateTuple(returned));
}
return CommandBuffer{std::move(arguments), std::move(results),
builder.Build(), std::move(inst_mapping)};
}
absl::StatusOr<HloComputation*> CommandBufferScheduling::RewriteCommandBuffer(
HloComputation* parent, const HloInstructionSequence& seq,
CommandBuffer command_buffer) {
if (command_buffer.results.empty())
return absl::InternalError("command buffer results must not be empty");
Shape cmd_buffer_result_shape;
bool has_single_result = command_buffer.results.size() == 1;
if (has_single_result) {
cmd_buffer_result_shape = command_buffer.results[0]->shape();
} else {
absl::InlinedVector<Shape, 4> shapes;
shapes.reserve(command_buffer.results.size());
for (auto* res : command_buffer.results) shapes.push_back(res->shape());
cmd_buffer_result_shape = ShapeUtil::MakeTupleShape(shapes);
}
HloComputation* computation =
parent->parent()->AddComputationAndUnifyNamesAndIds(
std::move(command_buffer.computation),
false);
HloInstruction* call = parent->AddInstruction(HloInstruction::CreateCall(
cmd_buffer_result_shape, command_buffer.arguments, computation));
if (has_single_result) {
TF_RETURN_IF_ERROR(command_buffer.results[0]->ReplaceAllUsesWith(call));
} else {
for (int i = 0; i < command_buffer.results.size(); i++) {
TF_RETURN_IF_ERROR(
command_buffer.results[i]->ReplaceAllUsesWith(parent->AddInstruction(
HloInstruction::CreateGetTupleElement(call, i))));
}
}
HloSchedule& schedule = parent->parent()->schedule();
HloInstructionSequence& sequence = schedule.GetOrCreateSequence(parent);
sequence.replace_instruction(seq.instructions().back(), call);
HloInstructionSequence cmd_buffer_schedule;
for (auto* argument : command_buffer.arguments) {
cmd_buffer_schedule.push_back(command_buffer.inst_mapping[argument]);
}
for (auto* inst : seq.instructions()) {
cmd_buffer_schedule.push_back(command_buffer.inst_mapping[inst]);
}
if (!has_single_result) {
cmd_buffer_schedule.push_back(computation->root_instruction());
}
schedule.set_sequence(computation, cmd_buffer_schedule);
auto& inst_mapping = command_buffer.inst_mapping;
for (HloInstruction* inst : seq.instructions()) {
HloInstruction* cmd_inst = inst_mapping[inst];
for (HloInstruction* predecessor : inst->control_predecessors()) {
if (auto it = inst_mapping.find(predecessor); it != inst_mapping.end()) {
HloInstruction* cmd_predecessor = it->second;
if (IsParameter(cmd_predecessor)) {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call));
} else {
TF_RETURN_IF_ERROR(cmd_predecessor->AddControlDependencyTo(cmd_inst));
}
} else {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call));
}
}
for (HloInstruction* successor : inst->control_successors()) {
if (auto it = inst_mapping.find(successor); it != inst_mapping.end()) {
HloInstruction* cmd_successor = it->second;
TF_RETURN_IF_ERROR(cmd_inst->AddControlDependencyTo(cmd_successor));
} else {
TF_RETURN_IF_ERROR(call->AddControlDependencyTo(successor));
}
}
TF_RETURN_IF_ERROR(inst->DropAllControlDeps());
}
for (int32_t i = seq.instructions().size() - 1; i >= 0; i--) {
TF_RETURN_IF_ERROR(parent->RemoveInstruction(seq.instructions()[i]));
}
return computation;
}
CommandBufferScheduling::CommandBufferScheduling(
const se::DeviceDescription& device_description,
int32_t gpu_toolkit_version, int32_t gpu_driver_version)
: device_description_(device_description),
gpu_toolkit_versi | #include "xla/service/gpu/command_buffer_scheduling.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_parser.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class CommandBufferSchedulingTest : public HloTestBase {
public:
static constexpr int32_t kCudaVersion = 12030;
const se::DeviceDescription& device_desc() {
return backend().default_stream_executor()->GetDeviceDescription();
}
DebugOptions GetDebugOptionsForTest() override {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CONDITIONALS);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES);
debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUDNN);
debug_options.set_xla_gpu_graph_min_graph_size(2);
return debug_options;
}
};
using CommandBuffer = CommandBufferScheduling::CommandBuffer;
TEST_F(CommandBufferSchedulingTest, SingleCommandBuffer) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1
ROOT %custom-call = s32[] custom-call(s32[] %fusion, s32[] %fusion.1), custom_call_target="some target"
})";
const char* expected = R"(
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, MultipleCommandBuffers) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%c = (s32[], s32[]) parameter(2)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %c), index=0
%fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1
%e = s32[] get-tuple-element((s32[], s32[]) %c), index=1
%custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2
%fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3
ROOT %custom-call.1 = s32[] custom-call(s32[] %fusion.3), custom_call_target="some target"
})";
const char* expected = R"(
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllGatherStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
ENTRY %main (a: s32[2]) -> s32[4] {
%a = s32[2] parameter(0)
%start = (s32[2]{0}, s32[4]{0}) all-gather-start(%a),
channel_id=555, replica_groups={{0,1}}, dimensions={0},
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[4]{0} all-gather-done(%start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[2]) -> s32[4] {
CHECK: %[[P0]] = s32[2]{0} parameter(0)
CHECK: %[[START:.+]] = {{.*}} all-gather-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-gather-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[2]) -> s32[4] {
CHECK: %[[A:.+]] = s32[2]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, ReduceScatterStartFollowedByDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[4]) -> s32[2] {
%a = s32[4] parameter(0)
%start = ((s32[4]{0}), s32[2]{0}) reduce-scatter-start(%a),
channel_id=555, replica_groups={{0,1}}, dimensions={0}, to_apply=add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
ROOT %done = s32[2]{0} reduce-scatter-done(%start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[2] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = {{.*}} reduce-scatter-start(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[2]{0} reduce-scatter-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[2] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[2]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByBitcast) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%bitcast = s32[4] bitcast(s32[4]{0} %a)
ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[BITCAST:.+]] = s32[4]{0} bitcast(%[[P0]])
CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedAllReduceStart) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4]) -> s32[4] {
%a = s32[4] parameter(0)
%start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1)
ROOT %done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] {
CHECK: %[[P0]] = s32[4]{0} parameter(0)
CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[P0]])
CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]])
CHECK: ROOT %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]])
CHECK: }
CHECK: ENTRY %main (a: s32[4]) -> s32[4] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]),
CHECK: to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, DoNotCaptureUnmatchedAsyncDone) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%add (p0: s32[4], p1: s32[4]) -> s32[4] {
%p0 = s32[4] parameter(0)
%p1 = s32[4] parameter(1)
ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1)
}
ENTRY %main (a: s32[4], b:s32[]) -> s32[] {
%a = s32[4] parameter(0)
%b = s32[] parameter(1)
%start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%c = s32[] custom-call(), custom_call_target="target"
%start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a),
replica_groups={{0,1}}, to_apply=%add,
backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}}
%done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1)
%done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2)
%fusion = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation
ROOT %fusion.1 = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] {
CHECK: %[[P0]] = s32[] parameter(0)
CHECK: %[[P1]] = s32[] parameter(1)
CHECK: %fusion.2 = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation
CHECK: ROOT %fusion.3 = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation.1
CHECK: }
CHECK: ENTRY %main (a: s32[4], b: s32[]) -> s32[] {
CHECK: %[[A:.+]] = s32[4]{0} parameter(0)
CHECK: %[[B:.+]] = s32[] parameter(1)
CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[A]])
CHECK: %[[C:.+]] = s32[] custom-call()
CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[A]])
CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]])
CHECK: %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]])
CHECK: %call = s32[] call(%b, %c), to_apply=%command_buffer
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, CollectCommandBufferSequence) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%c = (s32[], s32[]) parameter(2)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %c), index=0
%fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1
%e = s32[] get-tuple-element((s32[], s32[]) %c), index=1
%custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2
ROOT %fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
HloInstructionSequence seq;
for (HloInstruction* x : module->entry_computation()->instructions()) {
seq.push_back(x);
}
EXPECT_EQ(seq.size(), 10);
CommandBufferScheduling::CommandBufferConfig config{{DebugOptions::FUSION},
device_desc()};
std::vector<HloInstructionSequence> command_buffer_sequences =
CommandBufferScheduling::CollectCommandBufferSequences(seq, config);
EXPECT_EQ(command_buffer_sequences.size(), 2);
std::vector<HloInstruction*> seq_0 =
command_buffer_sequences[0].instructions();
EXPECT_EQ(seq_0.size(), 3);
EXPECT_EQ(seq_0[0]->opcode(), HloOpcode::kFusion);
EXPECT_EQ(seq_0[1]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(seq_0[2]->opcode(), HloOpcode::kFusion);
std::vector<HloInstruction*> seq_1 =
command_buffer_sequences[1].instructions();
EXPECT_EQ(seq_1.size(), 2);
EXPECT_EQ(seq_1[0]->opcode(), HloOpcode::kFusion);
EXPECT_EQ(seq_1[1]->opcode(), HloOpcode::kFusion);
}
TEST_F(CommandBufferSchedulingTest, MoveParametersToFront) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[], c: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%c = s32[] parameter(2)
ROOT %fusion.1 = s32[] fusion(s32[] %a, s32[] %c), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK(CommandBufferScheduling::MoveParametersAndConstantsToFront(
module->entry_computation()));
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
expected));
EXPECT_TRUE(filecheck_matches);
}
TEST_F(CommandBufferSchedulingTest, PrepareCommandBuffer) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation(param_0: s32[], param_1: s32[]) -> (s32[], s32[]) {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %tuple = (s32[], s32[]) tuple(s32[] %p0, s32[] %p1)
}
%fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] custom-call(), custom_call_target="target"
%fusion = (s32[], s32[]) fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation
%d = s32[] get-tuple-element((s32[], s32[]) %fusion), index=0
%fusion.1 = s32[] fusion(s32[] %a, s32[] %d), kind=kLoop, calls=%fused_computation.1
ROOT %custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %d), custom_call_target="some target"
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(hlo));
EXPECT_EQ(module->entry_computation()->instruction_count(), 6);
std::vector<HloInstruction*> instructions;
HloInstructionSequence seq;
for (HloInstruction* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kFusion ||
inst->opcode() == HloOpcode::kGetTupleElement) {
seq.push_back(inst);
}
instructions.push_back(inst);
}
TF_ASSERT_OK_AND_ASSIGN(CommandBuffer command_buffer,
CommandBufferScheduling::PrepareCommandBuffer(seq));
HloComputation* computation = module->AddComputationAndUnifyNamesAndIds(
std::move(command_buffer.computation), false);
const char* expected = R"(
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(computation->ToString(
HloPrintOptions{}.set_print_operand_shape(false)),
expected));
EXPECT_TRUE(filecheck_matches);
auto& arguments = command_buffer.arguments;
ASSERT_EQ(arguments.size(), 2);
EXPECT_EQ(arguments[0], instructions[0]);
EXPECT_EQ(arguments[1], instructions[1]);
auto& results = command_buffer.results;
ASSERT_EQ(results.size(), 2);
EXPECT_EQ(results[0], instructions[3]);
EXPECT_EQ(results[1], instructions[4]);
}
TEST_F(CommandBufferSchedulingTest, ForwardControlDependencies) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.2 (param_0: s32[], param_1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation, control-predecessors={%custom-call}
%fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1, control-predecessors={%fusion}
%custom-call.1 = s32[] custom-call(), custom_call_target="some target"
%fusion.2 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%fusion.1}
ROOT %custom-call.2 = s32[] custom-call(s32[] %fusion.1, s32[] %fusion.2), custom_call_target="some target"
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] {
CHECK: %[[P0]] = s32[] parameter(0)
CHECK: %[[P1]] = s32[] parameter(1)
CHECK: %[[F0:.+]] = s32[] fusion(%[[P0]], %[[P1]])
CHECK: ROOT {{.*}} = s32[] fusion(%[[P0]], %[[P1]]), {{.*}} control-predecessors={%[[F0]]}
CHECK: }
CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] {
CHECK: %a = s32[] parameter(0)
CHECK: %b = s32[] parameter(1)
CHECK: %custom-call = s32[] custom-call(), custom_call_target="some target"
CHECK: %call = s32[] call(%a, %b), to_apply=%command_buffer, control-predecessors={%custom-call}
CHECK: %custom-call.1 = s32[] custom-call(), custom_call_target="some target"
CHECK: %[[F3:.+]] = s32[] fusion(%a, %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%call}
CHECK: ROOT %custom-call.2 = s32[] custom-call(%call, %[[F3]]), custom_call_target="some target"
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, ForwardControlDependenciesToParams) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation.0 (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
%fused_computation.1 (p0: s32[], p1: s32[]) -> s32[] {
%p0 = s32[] parameter(0)
%p1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %p0, s32[] %p1)
}
ENTRY %main (a: s32[], b: s32[]) -> s32[] {
%a = s32[] parameter(0)
%b = s32[] parameter(1)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%fusion = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.0, control-predecessors={%custom-call}
ROOT %fusion.1 = s32[] fusion(s32[] %fusion, s32[] %b), kind=kLoop, calls=%fused_computation.1
})";
const char* expected = R"(
CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] {
CHECK: %a = s32[] parameter(0)
CHECK: %b = s32[] parameter(1)
CHECK: %[[CUSTOM_CALL:.+]] = s32[] custom-call(), custom_call_target="some target"
CHECK: ROOT {{.*}} call(%[[CUSTOM_CALL]], %a, %b), to_apply=%command_buffer, control-predecessors={%[[CUSTOM_CALL]]}
CHECK: })";
RunAndFilecheckHloRewrite(
hlo, CommandBufferScheduling(device_desc(), kCudaVersion, kCudaVersion),
expected, [](HloModule* module) {
EXPECT_TRUE(module->has_schedule());
TF_CHECK_OK(module->schedule().Verify());
});
}
TEST_F(CommandBufferSchedulingTest, WhileNotCommand) {
const char* hlo = R"(
HloModule TestModule, is_scheduled=true
%fused_computation (param_0: f32[1]) -> f32[1] {
%param_0 = f32[1]{0} parameter(0)
ROOT %copy.5 = f32[1]{0} copy(f32[1]{0} %param_0)
}
%fused_computation.1 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] {
%param_0.1 = f32[1]{0} parameter(0)
%param_1 = f32[1]{0} parameter(1)
ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1)
}
%fused_computation.2 (param_0.2: f32[1], param_1.1: f32[1]) -> pred[1] {
%param_0.2 = f32[1]{0} parameter(0)
%param_1.1 = f32[1]{0} parameter(1)
ROOT %compare.3 = pred[1]{0} compare(f32[1]{0} %param_0.2, f32[1]{0} %param_1.1), direction=LT
}
%fused_computation.3 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] {
%param_0.1 = f32[1]{0} parameter(0)
%param_1 = f32[1]{0} parameter(1)
ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1)
}
%body (Arg_.3: f32[1]) -> f32[1] {
%constant_4 = f32[1]{0} constant({1})
%Arg_.3 = f32[1]{0} parameter(0)
%custom-call = s32[] custom-call(), custom_call_target="some target"
%add = f32[1]{0} fusion(f32[1]{0} %Arg_.3, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.1, control-predecessors={%custom-call}
ROOT %wrapped_add.1 = f32[1]{0} fusion(f32[1]{0} %add, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.3, control-predecessors={%custom-call}
}
%cond (Arg_.11: f32[1]) -> pred[] {
%constant = f32[1]{0} constant({100})
%Arg_.11 = f32[1]{0} parameter(0)
%wrapped_compare.2 = pred[1]{0} fusion(f32[1]{0} %Arg_.11, f32[1]{0} %constant), kind=kLoop, calls=%fused_computation.2
ROOT %bitcast = pred[] bitcast(pred[1]{0} %wrapped_compare.2)
}
ENTRY %main.18 (Arg_0.1: f32[1]) -> f32[] {
%Arg_0.1 = f32[1]{0} parameter(0), sharding={replicated}
%wrapped_copy.4 = f32[1]{0} fusion(f32[1]{0} %Arg_0.1), kind=kLoop, calls=%fused_computation
%while.16 = f32[1]{0} while(f32[1]{0} %wrapped_copy.4), condition=%cond, body=%body
ROOT %bitcast.1 = f32[] bitcast(f32[1]{0} %while.16)
})";
const char* expected = R"(
CHECK: %command_buffer ([[P0:.+]]: f32[1], [[P1:.+]]: f32[1]) -> f32[1] {
CHECK: %[[P0]] = f32[1]{0} parameter(0)
CHECK: %[[P1]] = f32[1]{0} parameter(1)
CHECK: %[[ADD:.*]] = f32[1]{0} fusion(%[[P0]], %[[P1]]), kind=kLoop
CHECK: ROOT {{.*}} = f32[1]{0} fusion(%[[ADD]], %[[P1]]), kind=kLoop
CHECK: }
CHECK: %[[BODY:[a-z_0-9.]+]] ([[P0:.+]]: f32[1]) -> f32[1] {
CHECK: %[[C1:.*]] = f32[1]{0} constant({1})
CHECK: %[[P0]] = f32[1]{0} parameter(0)
CHE | 2,040 |
#ifndef XLA_SERVICE_GPU_CUDNN_PAD_FOR_CONVOLUTIONS_H_
#define XLA_SERVICE_GPU_CUDNN_PAD_FOR_CONVOLUTIONS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
class CudnnPadForConvolutions : public HloModulePass {
public:
explicit CudnnPadForConvolutions(se::CudaComputeCapability compute_capability)
: compute_capability_(compute_capability) {}
absl::string_view name() const override {
return "cudnn_pad_for_convolutions";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::CudaComputeCapability compute_capability_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static HloInstruction* PadInstruction(HloInstruction* instr,
const Shape& new_shape) {
HloComputation* comp = instr->parent();
const Shape& shape = instr->shape();
PaddingConfig pad_config = MakeNoPaddingConfig(shape.rank());
bool added_padding = false;
for (int64_t dim = 0; dim < shape.rank(); ++dim) {
if (shape.dimensions(dim) == new_shape.dimensions(dim)) {
continue;
}
CHECK_GT(new_shape.dimensions(dim), shape.dimensions(dim));
pad_config.mutable_dimensions(dim)->set_edge_padding_high(
new_shape.dimensions(dim) - shape.dimensions(dim));
added_padding = true;
}
if (!added_padding) {
return instr;
}
auto* zero = comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(shape.element_type())));
return comp->AddInstruction(
HloInstruction::CreatePad(new_shape, instr, zero, pad_config),
&instr->metadata());
}
static absl::Status PadConv(HloCustomCallInstruction* conv,
absl::Span<const Shape> new_input_shapes,
const Shape& new_result_shape) {
CHECK_EQ(0, conv->shape().tuple_shapes(1).dimensions(0))
<< "conv must use 0 scratch bytes, i.e. this pass must be run "
"before CudnnConvAlgorithmPicker.";
std::vector<HloInstruction*> new_operands;
new_operands.reserve(conv->operand_count());
for (int i = 0; i < conv->operand_count(); ++i) {
new_operands.push_back(
PadInstruction(conv->mutable_operand(i), new_input_shapes[i]));
}
const Shape& result_shape = conv->shape().tuple_shapes(0);
bool changed = false;
for (int i = 0; i < conv->operand_count(); ++i) {
changed |= (new_operands[i] != conv->mutable_operand(i));
}
CHECK(changed) << "We should have had to pad at least one input operand.";
auto add = [&](std::unique_ptr<HloInstruction> new_instr) {
return conv->parent()->AddInstruction(std::move(new_instr));
};
Shape new_conv_shape = ShapeUtil::MakeTupleShape(
{new_result_shape, ShapeUtil::MakeShape(U8, {0})});
auto* new_conv =
add(conv->CloneWithNewOperands(new_conv_shape, new_operands));
new_conv->SetAndSanitizeName(conv->name());
VLOG(2) << "Padded features of " << conv->ToString() << ", replaced with "
<< new_conv->ToString();
if (!ShapeUtil::Equal(result_shape, new_result_shape)) {
std::vector<int64_t> start_indices(result_shape.dimensions_size(), 0);
std::vector<int64_t> end_indices(result_shape.dimensions().begin(),
result_shape.dimensions().end());
std::vector<int64_t> strides(result_shape.dimensions_size(), 1);
auto* new_conv_result = add(
HloInstruction::CreateGetTupleElement(new_result_shape, new_conv, 0));
auto* empty_temp_buffer =
add(HloInstruction::CreateConstant(LiteralUtil::CreateR1<uint8_t>({})));
auto* sliced_result = add(HloInstruction::CreateSlice(
result_shape, new_conv_result, start_indices, end_indices, strides));
new_conv =
add(HloInstruction::CreateTuple({sliced_result, empty_temp_buffer}));
}
return conv->parent()->ReplaceInstruction(conv, new_conv);
}
static std::vector<HloCustomCallInstruction*> GetRelevantConvs(
HloComputation* comp) {
std::vector<HloCustomCallInstruction*> convs;
for (HloInstruction* instr : comp->instructions()) {
if (IsCustomCallToDnnConvolution(*instr)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
return convs;
}
static absl::StatusOr<bool> ResolveAndPad(
HloCustomCallInstruction* conv,
std::function<absl::StatusOr<bool>(HloCustomCallInstruction* conv,
std::vector<Shape>* new_input_shapes,
Shape* new_result_shape)>
resolve_pad_shapes) {
std::vector<Shape> new_input_shapes;
Shape new_result_shape;
TF_ASSIGN_OR_RETURN(bool result, resolve_pad_shapes(conv, &new_input_shapes,
&new_result_shape));
if (result) {
TF_RETURN_IF_ERROR(PadConv(conv, new_input_shapes, new_result_shape));
return true;
}
return false;
}
static absl::StatusOr<bool> TryResolvePaddedShapesForTensorCore(
HloCustomCallInstruction* conv, std::vector<Shape>* new_input_shapes_ptr,
Shape* new_result_shape_ptr) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(conv));
const auto& dnums = conv->convolution_dimension_numbers();
auto* lhs = conv->mutable_operand(0);
auto* rhs = conv->mutable_operand(1);
const Shape& result_shape = conv->shape().tuple_shapes(0);
if (result_shape.element_type() != PrimitiveType::F16) {
return false;
}
if (conv->feature_group_count() > 1 || conv->batch_group_count() > 1) {
VLOG(2) << "Do not pad grouped convolution.";
return false;
}
if (kind == CudnnConvKind::kForwardActivation) {
return false;
}
Shape new_lhs_shape = lhs->shape();
Shape new_rhs_shape = rhs->shape();
Shape& new_result_shape = *new_result_shape_ptr;
new_result_shape = conv->shape().tuple_shapes(0);
Shape* new_input_shape;
Shape* new_filter_shape;
Shape* new_output_shape;
std::tie(new_input_shape, new_filter_shape, new_output_shape) = [&] {
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
return std::make_tuple(&new_lhs_shape, &new_rhs_shape,
&new_result_shape);
case CudnnConvKind::kBackwardInput:
return std::make_tuple(&new_result_shape, &new_rhs_shape,
&new_lhs_shape);
case CudnnConvKind::kBackwardFilter:
return std::make_tuple(&new_lhs_shape, &new_result_shape,
&new_rhs_shape);
}
}();
auto input_features =
new_input_shape->dimensions(dnums.input_feature_dimension());
auto output_features =
new_output_shape->dimensions(dnums.output_feature_dimension());
if (input_features == 3 && (output_features == 32 || output_features == 64)) {
new_input_shape->set_dimensions(dnums.input_feature_dimension(), 4);
new_filter_shape->set_dimensions(dnums.kernel_input_feature_dimension(), 4);
} else {
auto pad_dim = [](Shape* s, int64_t dim) {
s->set_dimensions(dim, RoundUpTo<int64_t>(s->dimensions(dim), 8));
};
pad_dim(new_input_shape, dnums.input_feature_dimension());
pad_dim(new_filter_shape, dnums.kernel_input_feature_dimension());
pad_dim(new_filter_shape, dnums.kernel_output_feature_dimension());
pad_dim(new_output_shape, dnums.output_feature_dimension());
static constexpr double kMaxBytesTouchedBound = 1.35;
auto check_size_increase = [&](const Shape& old_shape,
const Shape& new_shape) {
int64_t old_bytes = ShapeUtil::ByteSizeOf(old_shape);
int64_t new_bytes = ShapeUtil::ByteSizeOf(new_shape);
if (new_bytes <= old_bytes * kMaxBytesTouchedBound) {
return true;
}
VLOG(3)
<< "Not padding convolution; doing so would change input / result "
"shape from "
<< ShapeUtil::HumanString(old_shape) << " to "
<< ShapeUtil::HumanString(new_shape) << ", a size increase of "
<< new_bytes / static_cast<double>(old_bytes) << "x > "
<< kMaxBytesTouchedBound << "x: " << conv->ToString();
return false;
};
if (!check_size_increase(lhs->shape(), new_lhs_shape) ||
!check_size_increase(rhs->shape(), new_rhs_shape) ||
!check_size_increase(result_shape, new_result_shape)) {
return false;
}
}
if (ShapeUtil::Equal(lhs->shape(), new_lhs_shape) &&
ShapeUtil::Equal(rhs->shape(), new_rhs_shape)) {
VLOG(3) << "No need to pad features of " << conv->ToString();
return false;
}
new_input_shapes_ptr->push_back(new_lhs_shape);
new_input_shapes_ptr->push_back(new_rhs_shape);
return true;
}
absl::StatusOr<bool> TryResolvePaddedShapesForIntegerConvolution(
int pad_to, const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction* conv, std::vector<Shape>* new_input_shapes_ptr,
Shape* new_result_shape_ptr) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(conv));
const Shape& input_shape = conv->operand(0)->shape();
const Shape& kernel_shape = conv->operand(1)->shape();
const Shape& result_shape = conv->shape().tuple_shapes(0);
if (!primitive_util::IsIntegralType(input_shape.element_type())) {
return false;
}
if (kind != CudnnConvKind::kForward &&
kind != CudnnConvKind::kForwardActivation) {
return false;
}
const auto& dnums = conv->convolution_dimension_numbers();
std::vector<Shape>& new_input_shapes = *new_input_shapes_ptr;
for (auto operand : conv->operands()) {
new_input_shapes.push_back(operand->shape());
}
Shape& new_result_shape = *new_result_shape_ptr;
new_result_shape = conv->shape().tuple_shapes(0);
std::optional<int64_t> input_vect_dim;
std::optional<int64_t> kernel_vect_dim;
std::optional<int64_t> result_vect_dim;
std::tie(input_vect_dim, kernel_vect_dim, result_vect_dim) =
FindVectorizedFeatureDims(dnums, input_shape, kernel_shape, result_shape);
int64_t input_vect_size =
input_vect_dim.has_value() ? input_shape.dimensions(*input_vect_dim) : 1;
int64_t kernel_vect_size = kernel_vect_dim.has_value()
? kernel_shape.dimensions(*kernel_vect_dim)
: 1;
int64_t result_vect_size = result_vect_dim.has_value()
? result_shape.dimensions(*result_vect_dim)
: 1;
if (pad_to % input_vect_size != 0 || pad_to % kernel_vect_size != 0 ||
pad_to % result_vect_size != 0) {
return false;
}
TF_ASSIGN_OR_RETURN(bool cudnn_supports,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, pad_to));
if (!cudnn_supports) {
return false;
}
{
auto pad_dim = [&](Shape* s, int64_t dim, int64_t cur_vect_size) {
CHECK_EQ(pad_to % cur_vect_size, 0);
s->set_dimensions(
dim, RoundUpTo<int64_t>(s->dimensions(dim), pad_to / cur_vect_size));
};
switch (kind) {
case CudnnConvKind::kForward:
CHECK_EQ(new_input_shapes.size(), 2);
pad_dim(new_input_shapes.data(), dnums.input_feature_dimension(),
input_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_input_feature_dimension(),
kernel_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_output_feature_dimension(),
1);
pad_dim(&new_result_shape, dnums.output_feature_dimension(),
result_vect_size);
break;
case CudnnConvKind::kForwardActivation:
CHECK(new_input_shapes.size() == 3 || new_input_shapes.size() == 4);
pad_dim(new_input_shapes.data(), dnums.input_feature_dimension(),
input_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_input_feature_dimension(),
kernel_vect_size);
pad_dim(&new_input_shapes[1], dnums.kernel_output_feature_dimension(),
1);
pad_dim(&new_input_shapes[2], 0, 1);
if (new_input_shapes.size() == 4) {
pad_dim(&new_input_shapes[3], dnums.output_feature_dimension(),
result_vect_size);
}
pad_dim(&new_result_shape, dnums.output_feature_dimension(),
result_vect_size);
break;
default:
CHECK(false);
}
static constexpr double kMaxBytesTouchedBound = 2;
auto check_size_increase = [&](const Shape& old_shape,
const Shape& new_shape) {
int64_t old_bytes = ShapeUtil::ByteSizeOf(old_shape);
int64_t new_bytes = ShapeUtil::ByteSizeOf(new_shape);
if (new_bytes < old_bytes * kMaxBytesTouchedBound) {
return true;
}
VLOG(3)
<< "Not padding convolution; doing so would change input / result "
"shape from "
<< ShapeUtil::HumanString(old_shape) << " to "
<< ShapeUtil::HumanString(new_shape) << ", a size increase of "
<< new_bytes / static_cast<double>(old_bytes)
<< "x >= " << kMaxBytesTouchedBound << "x: " << conv->ToString();
return false;
};
if (!check_size_increase(conv->operand(0)->shape(), new_input_shapes[0]) ||
!check_size_increase(result_shape, new_result_shape)) {
return false;
}
}
bool changed = false;
for (int64_t i = 0; i < conv->operand_count(); ++i) {
changed |=
!ShapeUtil::Equal(conv->operand(i)->shape(), new_input_shapes[i]);
}
if (!changed) {
VLOG(3) << "No need to pad features of " << conv->ToString();
}
return changed;
}
absl::StatusOr<bool> CudnnPadForConvolutions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {
bool local_changed = false;
if (compute_capability_.IsAtLeast(7, 5)) {
TF_ASSIGN_OR_RETURN(
local_changed,
ResolveAndPad(conv, absl::bind_front(
TryResolvePaddedShapesForIntegerConvolution,
32, compute_capability_)));
}
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
ResolveAndPad(conv, absl::bind_front(
TryResolvePaddedShapesForIntegerConvolution,
4, compute_capability_)));
}
changed |= local_changed;
}
if (compute_capability_.IsAtLeast(se::CudaComputeCapability::VOLTA)) {
for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {
TF_ASSIGN_OR_RETURN(
bool local_changed,
ResolveAndPad(conv, TryResolvePaddedShapesForTensorCore));
changed |= local_changed;
}
}
}
return changed;
}
}
} | #include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = xla::match;
class CudnnPadForConvolutionsTest : public HloTestBase {};
TEST_F(CudnnPadForConvolutionsTest, DoNotPadF16ForwardConvWhenGrouped) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[704,48,1,49]{3,2,1,0} parameter(0)
filter = f16[44,768,1,50]{3,2,1,0} parameter(1)
ROOT result = (f16[1,128,48,768]{3,2,1,0}, u8[0]{0})
custom-call(input, filter)
, window={size=1x50 pad=0_0x64_64}
, dim_labels=fb01_io01->01bf
, feature_group_count=16
, custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
}
TEST_F(CudnnPadForConvolutionsTest, PadF16ForwardConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
filter = f16[2,2,41,40] parameter(1)
ROOT result = (f16[10,20,30,40], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 48}),
m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 48, 40}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardInputConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,41] parameter(0)
filter = f16[2,2,40,41] parameter(1)
ROOT result = (f16[10,20,30,40], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvBackwardInputCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 48}),
m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 40, 48}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16ForwardConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,40] parameter(0)
filter = f16[2,2,40,41] parameter(1)
ROOT result = (f16[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardInputConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,40] parameter(0)
filter = f16[2,2,41,40] parameter(1)
result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBackwardInputCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op()))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardFilterConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
output = f16[10,20,30,40] parameter(1)
result = (f16[2,2,41,40], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBackwardFilterCallTarget},
m::Pad(m::Parameter(0), m::Op()), m::Parameter(1)))),
m::Op()))));
}
TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardFilterConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,40] parameter(0)
output = f16[10,20,30,41] parameter(1)
result = (f16[2,2,40,41], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,40,41] get-tuple-element(result), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBackwardFilterCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op()))));
}
TEST_F(CudnnPadForConvolutionsTest, PadInputFeatures3To4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,3] parameter(0)
filter = f16[2,2,3,32] parameter(1)
ROOT result = (f16[10,20,30,32], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 4}),
m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 4, 32}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadIntForwardConvInputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,40] parameter(1)
ROOT result = (f32[10,20,30,40], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
root,
GmockMatch(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 44}),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 44, 40}))));
}
TEST_F(CudnnPadForConvolutionsTest, PadIntForwardConvOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,41] parameter(1)
ROOT result = (f32[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op())))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, PadInt8To32OnSm75) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,41] parameter(1)
ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 64}),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 64, 64})))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, NoPadInt8To32OnSm70) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,41] parameter(1)
ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 40, 44})))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, NoPadInt8To32FloatOutputSm75) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,38] parameter(0)
filter = s8[2,2,38,41] parameter(1)
ROOT result = (f32[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 40}),
m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 40, 44})))),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, NoPadInt8UnsupportedFilterTypeOutputSm75) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,38] parameter(0)
filter = f32[2,2,38,41] parameter(1)
ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
}
TEST_F(CudnnPadForConvolutionsTest, NoPadToInt8x32ExcessiveBlowup) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[128,4,48,48] parameter(0)
filter = s8[64,4,3,3] parameter(1)
ROOT result = (f32[128,64,48,48], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
}
TEST_F(CudnnPadForConvolutionsTest, PadInt8x4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41,4] parameter(0)
filter = s8[2,2,41,4,168] parameter(1)
ROOT result = (s8[10,20,30,42,4], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f?_01i?o->b01f?,
custom_call_target="__cudnn$convForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget},
m::Pad(m::Parameter(0), m::Op())
.WithShape(S8, {10, 20, 30, 48, 4}),
m::Pad(m::Parameter(1), m::Op())
.WithShape(S8, {2, 2, 48, 4, 192})))
.WithShape(S8, {10, 20, 30, 48, 4})),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest, PadInt8x4To32BiasActivation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41,4] parameter(0)
filter = s8[2,2,41,4,168] parameter(1)
bias = f32[10] parameter(2)
side_input = s8[10,20,30,42,4] parameter(3)
ROOT result = (s8[10,20,30,42,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f?_01i?o->b01f?,
custom_call_target="__cudnn$convBiasActivationForward"
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(
m::GetTupleElement(
m::CustomCall(
{kCudnnConvBiasActivationForwardCallTarget},
m::Pad(m::Parameter(0), m::Op())
.WithShape(S8, {10, 20, 30, 48, 4}),
m::Pad(m::Parameter(1), m::Op())
.WithShape(S8, {2, 2, 48, 4, 192}),
m::Pad(m::Parameter(2), m::Op()).WithShape(F32, {32}),
m::Pad(m::Parameter(3), m::Op())
.WithShape(S8, {10, 20, 30, 48, 4})))
.WithShape(S8, {10, 20, 30, 48, 4})),
m::Op())));
}
TEST_F(CudnnPadForConvolutionsTest,
PadIntFusedForwardConvInputAndOutputChannels) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule Test
ENTRY %Test (input: s8[1,3,3,2], filter: s8[3,3,2,5], side_input: s8[1,3,3,5], bias: s8[5]) -> f32[1,3,3,5] {
%input = s8[1,3,3,3]{3,2,1,0} parameter(0)
%filter = s8[3,3,2,5]{3,2,1,0} parameter(1)
%bias = s8[5]{0} parameter(3)
%convert = f32[5]{0} convert(s8[5]{0} %bias)
%side_input = f32[1,3,3,5]{3,2,1,0} parameter(2)
%custom-call.1 = (f32[1,3,3,5]{3,2,1,0}, u8[0]{0}) custom-call(s8[1,3,3,3]{3,2,1,0} %input, s8[3,3,2,5]{3,2,1,0} %filter, f32[5]{0} %convert, f32[1,3,3,5]{3,2,1,0} %side_input), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBiasActivationForward", backend_config="{\"activationMode\":\"2\",\"convResultScale\":1,\"sideInputScale\":1}"
ROOT %get-tuple-element.1 = f32[1,3,3,5]{3,2,1,0} get-tuple-element((f32[1,3,3,5]{3,2,1,0}, u8[0]{0}) %custom-call.1), index=0
})")
.value();
EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(m::Tuple(
m::Slice(m::GetTupleElement(m::CustomCall(
{kCudnnConvBiasActivationForwardCallTarget},
m::Pad(m::Parameter(0), m::Op()),
m::Pad(m::Parameter(1), m::Op()),
m::Pad(m::Convert(m::Parameter(3)), m::Op()),
m::Pad(m::Parameter(2), m::Op())))),
m::Op()))));
}
}
}
} | 2,041 |
#ifndef XLA_SERVICE_GPU_GPU_HLO_SCHEDULE_H_
#define XLA_SERVICE_GPU_GPU_HLO_SCHEDULE_H_
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace gpu {
absl::Status IsProfileApplicable(
const HloModule* module,
const tensorflow::profiler::ProfiledInstructionsProto& profile);
struct ScheduleMetadata {
int64_t scheduler_mem_limit;
};
absl::StatusOr<ScheduleMetadata> ScheduleGpuModule(
HloModule* module, int64_t pointer_size,
const se::DeviceDescription& gpu_device_info);
HloInstructionSequence PostProcessSchedule(const HloInstructionSequence& input);
constexpr absl::string_view kFingerprintBeforeLHS = "fingerprint_before_lhs";
}
}
#endif
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/buffer_value.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/gpu/gpu_schedule_postprocessing.h"
#include "xla/service/gpu/model/analytical_latency_estimator.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/service/p2p_schedule_preparation.h"
#include "xla/service/profile_guided_latency_estimator.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool ShouldScheduleAsEarlyAsPossible(const HloInstruction& instr) {
switch (instr.opcode()) {
case HloOpcode::kAllReduceStart:
case HloOpcode::kCollectivePermuteStart:
return !IsSyncCollective(&instr);
case HloOpcode::kCustomCall:
return static_cast<const HloCustomCallInstruction&>(instr)
.custom_call_schedule() ==
CustomCallSchedule::SCHEDULE_EARLIEST;
default:
return false;
}
}
bool ShouldScheduleSuccessor(const HloInstruction& sussessor,
const HloPredicate& is_scheduled) {
return ShouldScheduleAsEarlyAsPossible(sussessor) &&
absl::c_all_of(sussessor.operands(), is_scheduled) &&
absl::c_all_of(sussessor.control_predecessors(), is_scheduled);
}
bool ShouldScheduleAsLateAsPossible(const HloInstruction& instr) {
switch (instr.opcode()) {
case HloOpcode::kAllReduceDone:
case HloOpcode::kCollectivePermuteDone:
return ShouldScheduleAsEarlyAsPossible(*instr.operand(0));
case HloOpcode::kCustomCall:
return static_cast<const HloCustomCallInstruction&>(instr)
.custom_call_schedule() == CustomCallSchedule::SCHEDULE_LATEST;
default:
return false;
}
}
bool ShouldSchedulePredecessor(const HloInstruction& predecessor,
const HloPredicate& is_scheduled) {
return ShouldScheduleAsLateAsPossible(predecessor) &&
absl::c_all_of(predecessor.users(), is_scheduled) &&
absl::c_all_of(predecessor.control_successors(), is_scheduled);
}
HloInstructionSequence PostprocessorToScheduleAsEarlyOrLateAsPossible(
const HloInstructionSequence& input) {
std::vector<HloInstruction*> earliest_scheduled;
{
absl::flat_hash_set<HloInstruction*> scheduled;
auto is_scheduled = [&](const HloInstruction* instr) -> bool {
return scheduled.contains(instr);
};
auto add_to_schedule = [&](HloInstruction* instr) {
earliest_scheduled.push_back(instr);
scheduled.insert(instr);
};
for (HloInstruction* instr : input.instructions()) {
if (is_scheduled(instr)) continue;
add_to_schedule(instr);
for (HloInstruction* user : instr->users()) {
if (is_scheduled(user)) continue;
if (ShouldScheduleSuccessor(*user, is_scheduled)) {
add_to_schedule(user);
}
}
for (HloInstruction* successor : instr->control_successors()) {
if (is_scheduled(successor)) continue;
if (ShouldScheduleSuccessor(*successor, is_scheduled)) {
add_to_schedule(successor);
}
}
}
}
std::deque<HloInstruction*> latest_scheduled;
{
absl::flat_hash_set<HloInstruction*> scheduled;
auto is_scheduled = [&](const HloInstruction* instr) -> bool {
return scheduled.contains(instr);
};
auto add_to_schedule = [&](HloInstruction* instr) {
latest_scheduled.push_front(instr);
scheduled.insert(instr);
};
for (auto it = earliest_scheduled.rbegin(); it != earliest_scheduled.rend();
it++) {
if (is_scheduled(*it)) continue;
add_to_schedule(*it);
for (HloInstruction* operand : (*it)->operands()) {
if (is_scheduled(operand)) continue;
if (ShouldSchedulePredecessor(*operand, is_scheduled)) {
add_to_schedule(operand);
}
}
for (HloInstruction* predecessor : (*it)->control_predecessors()) {
if (is_scheduled(predecessor)) continue;
if (ShouldSchedulePredecessor(*predecessor, is_scheduled)) {
add_to_schedule(predecessor);
}
}
}
}
HloInstructionSequence result;
absl::c_for_each(latest_scheduled,
[&](HloInstruction* i) { result.push_back(i); });
CHECK(input.instructions().size() == result.size())
<< "schedule as early or late post-processing changed schedule size from "
<< input.instructions().size() << " to " << result.size();
return result;
}
HloInstructionSequence PostprocessorToScheduleSyncCollectives(
const HloInstructionSequence& input) {
HloInstructionSequence result;
auto is_sync_start = [](const HloInstruction* instr) {
return hlo_query::IsAsyncCollectiveStartOp(instr,
true) &&
IsSyncCollective(instr);
};
for (HloInstruction* instr : input.instructions()) {
if (is_sync_start(instr)) continue;
if (hlo_query::IsAsyncCollectiveDoneOp(instr, true)) {
HloInstruction* start = instr->mutable_operand(0);
if (is_sync_start(start)) result.push_back(start);
}
result.push_back(instr);
}
CHECK(input.instructions().size() == result.size())
<< "sync collectives post-processing changed schedule size from "
<< input.instructions().size() << " to " << result.size();
return result;
}
absl::StatusOr<HloSchedule> ScheduleGpuModuleWithMemoryScheduler(
const HloModule* module, int64_t pointer_size) {
return ScheduleModule(
module,
[pointer_size](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), pointer_size);
},
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler,
PostProcessSchedule));
}
SchedulerConfig GetSchedulerConfig(int64_t memory_limit) {
SchedulerConfig config;
config.all_reduce_overlap_limit = 1;
config.collective_broadcast_overlap_limit = 1;
config.collective_permute_overlap_limit = 1;
config.use_real_cost_model = false;
config.aggressive_scheduling_policies = true;
config.schedule_send_recvs = true;
config.memory_limit = memory_limit;
return config;
}
tensorflow::profiler::ProfiledInstructionsProto GetProfileForFingerprint(
tensorflow::profiler::ProfiledInstructionsProto& profile,
const std::string& fingerprint) {
tensorflow::profiler::ProfiledInstructionsProto result;
bool merge_remat_clones = false;
for (const auto& cost : profile.costs()) {
absl::string_view cost_name = cost.name();
std::string new_cost_name = cost.name();
absl::string_view cost_sep = "::";
if (absl::StrContains(cost_name, cost_sep)) {
std::vector<std::string> split_names =
absl::StrSplit(cost_name, cost_sep);
if (split_names.size() != 2 || split_names[0] != fingerprint) {
continue;
}
new_cost_name = split_names[1];
}
merge_remat_clones |= absl::StrContains(new_cost_name, ".remat");
auto* new_cost = result.add_costs();
new_cost->set_cost_us(cost.cost_us());
new_cost->set_name(new_cost_name);
}
if (!merge_remat_clones) {
return result;
}
auto strip_remat_suffix = [](absl::string_view name) -> absl::string_view {
absl::string_view suffix = ".remat";
size_t index = name.rfind(suffix);
if (index == std::string::npos) {
return name;
}
auto after_suffix = name.substr(index + suffix.size());
int64_t numeric_suffix;
if (after_suffix.empty() ||
absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
return name.substr(0, index);
}
return name;
};
absl::flat_hash_map<absl::string_view, std::pair<double, int64_t>> costs;
for (const auto& cost : result.costs()) {
std::pair<double, int64_t>& data = costs[strip_remat_suffix(cost.name())];
data.first += cost.cost_us();
data.second++;
}
tensorflow::profiler::ProfiledInstructionsProto merged_result;
for (const auto& cost : costs) {
auto* new_cost = merged_result.add_costs();
double average = cost.second.first / cost.second.second;
new_cost->set_cost_us(average);
new_cost->set_name(std::string(cost.first));
}
return merged_result;
}
std::optional<tensorflow::profiler::ProfiledInstructionsProto> ReadPGLEProfile(
const HloModule* module, const std::string& fingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile;
absl::string_view fdo_profile = module->config().fdo_profile();
if (!fdo_profile.empty()) {
if (tsl::ParseProtoUnlimited(&profile, fdo_profile.data(),
fdo_profile.size())) {
LOG(INFO) << "Using PGLE profile for module from fdo_profile (binary)";
return GetProfileForFingerprint(profile, fingerprint);
}
profile.Clear();
if (tsl::protobuf::TextFormat::ParseFromString(std::string(fdo_profile),
&profile)) {
LOG(INFO) << "Using PGLE profile for module from fdo_profile (text)";
return GetProfileForFingerprint(profile, fingerprint);
}
LOG(ERROR) << "Unable to prase FDO profile: not a valid text or binary "
"ProfiledInstructionsProto";
}
const std::string& pgle_profile_file_or_dir_path =
module->config()
.debug_options()
.xla_gpu_pgle_profile_file_or_directory_path();
if (pgle_profile_file_or_dir_path.empty()) {
return std::nullopt;
}
tsl::Env* env = tsl::Env::Default();
auto read_text_or_binary_profile = [&profile, env, &fingerprint](
const std::string& text_path,
const std::string& binary_path)
-> std::optional<tensorflow::profiler::ProfiledInstructionsProto> {
if (env->FileExists(text_path).ok()) {
absl::Status s = tsl::ReadTextProto(env, text_path, &profile);
if (s.ok()) {
LOG(INFO) << "Using PGLE profile from " << text_path;
return GetProfileForFingerprint(profile, fingerprint);
} else {
LOG(ERROR) << "Unable to read PGLE text proto from " << text_path
<< ": " << s.message();
}
profile.Clear();
}
if (env->FileExists(binary_path).ok()) {
absl::Status s = tsl::ReadBinaryProto(env, binary_path, &profile);
if (s.ok()) {
LOG(INFO) << "Using PGLE profile from " << binary_path;
return GetProfileForFingerprint(profile, fingerprint);
} else {
LOG(ERROR) << "Unable to read PGLE binary proto from " << binary_path
<< ": " << s.message();
}
profile.Clear();
}
return std::nullopt;
};
if (env->IsDirectory(pgle_profile_file_or_dir_path).ok()) {
std::string pgle_profile_path_prefix =
pgle_profile_file_or_dir_path + "/" + fingerprint;
return read_text_or_binary_profile(pgle_profile_path_prefix + ".pbtxt",
pgle_profile_path_prefix + ".pb");
}
auto extension = tsl::io::Extension(pgle_profile_file_or_dir_path);
if (extension == "pbtxt") {
return read_text_or_binary_profile(pgle_profile_file_or_dir_path, "");
} else if (extension == "pb") {
return read_text_or_binary_profile("", pgle_profile_file_or_dir_path);
} else {
return read_text_or_binary_profile(pgle_profile_file_or_dir_path,
pgle_profile_file_or_dir_path);
}
}
}
absl::Status IsProfileApplicable(
const HloModule* module,
const tensorflow::profiler::ProfiledInstructionsProto& profile) {
absl::flat_hash_set<absl::string_view> all_instruction_names;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* instr : comp->instructions()) {
all_instruction_names.insert(instr->name());
}
}
std::vector<std::string> missing_costs_names;
for (const auto& cost : profile.costs()) {
if (!all_instruction_names.contains(cost.name())) {
missing_costs_names.push_back(cost.name());
}
}
std::vector<std::string> missing_latency_names;
for (const auto& latency : profile.latencies()) {
if (!all_instruction_names.contains(latency.source())) {
missing_latency_names.push_back(latency.source());
}
if (!all_instruction_names.contains(latency.target())) {
missing_latency_names.push_back(latency.target());
}
}
if (!(missing_costs_names.empty() && missing_latency_names.empty())) {
return absl::InvalidArgumentError(
absl::StrFormat("\nMissing costs: %s;\nMissing latencies: %s",
absl::StrJoin(missing_costs_names, ", "),
absl::StrJoin(missing_latency_names, ", ")));
}
return absl::OkStatus();
}
static int64_t GetSchedulerMemoryLimit(
const HloModule* module, const se::DeviceDescription& gpu_device_info,
int pointer_size);
absl::StatusOr<ScheduleMetadata> ScheduleGpuModule(
HloModule* module, int64_t pointer_size,
const se::DeviceDescription& gpu_device_info) {
int64_t memory_limit =
GetSchedulerMemoryLimit(module, gpu_device_info, pointer_size);
if (module->has_schedule()) {
return ScheduleMetadata{memory_limit};
}
HloPassPipeline prepare_pipeline("p2p-schedule-preparation");
prepare_pipeline.AddPass<P2PSchedulePreparation>();
TF_RETURN_IF_ERROR(prepare_pipeline.Run(module).status());
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
ScheduleGpuModuleWithMemoryScheduler(module, pointer_size));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
std::string fingerprint = module->GetFingerprint128(
HloPrintOptions::Canonical().set_print_backend_config(true));
FrontendAttributes attributes;
(*attributes.mutable_map())[std::string(kFingerprintBeforeLHS)] = fingerprint;
module->add_frontend_attributes(attributes);
VLOG(1) << "Fingerprint before LHS for module " << module->name() << "("
<< module->unique_id() << ") = " << fingerprint;
const bool enable_latency_hiding_scheduler =
module->config()
.debug_options()
.xla_gpu_enable_latency_hiding_scheduler();
if (!enable_latency_hiding_scheduler) {
return ScheduleMetadata{memory_limit};
}
SchedulerConfig config = GetSchedulerConfig(memory_limit);
auto gpu_latency_estimator =
std::make_unique<GpuLatencyEstimator>(pointer_size);
std::unique_ptr<LatencyEstimator> latency_estimator;
std::optional<tensorflow::profiler::ProfiledInstructionsProto> profile =
ReadPGLEProfile(module, fingerprint);
const bool enable_analytical_latency_estimator =
module->config()
.debug_options()
.xla_gpu_enable_analytical_latency_estimator();
if (profile.has_value()) {
latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
config, std::move(gpu_latency_estimator), profile.value());
LOG(INFO)
<< "Found profile, using profile guided latency estimator. Profile:\n"
<< profile->DebugString();
absl::Status s = IsProfileApplicable(module, profile.value());
if (!s.ok()) {
LOG(INFO) << "PGLE profile may not applicable to the module, but will "
"still be used : "
<< s.message();
}
} else if (enable_analytical_latency_estimator) {
latency_estimator = std::make_unique<AnalyticalLatencyEstimator>(
config, std::move(gpu_latency_estimator), gpu_device_info,
[input_pointer_size = pointer_size](const Shape& shape) {
return GetSizeOfShape(shape, input_pointer_size);
},
module->entry_computation());
LOG(INFO) << "Using analytical latency estimator";
} else {
latency_estimator = std::move(gpu_latency_estimator);
}
auto async_tracker = [&]() -> std::unique_ptr<AsyncTracker> {
return module->config()
.debug_options()
.xla_gpu_lhs_enable_gpu_async_tracker()
? std::make_unique<GpuAsyncTracker>(config)
: std::make_unique<GpuAsyncTrackerBase>(config);
}();
auto shape_size_in_bytes = [pointer_size](const Shape& shape) {
return GetSizeOfShape(shape, pointer_size);
};
HloPassPipeline pipeline("latency-hiding-scheduler");
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_in_bytes, async_tracker.get(), latency_estimator.get(),
config);
pipeline.AddPass<LatencyHidingScheduler>(
std::move(latency_estimator), std::move(async_tracker),
std::move(scheduler_core), shape_size_in_bytes);
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
HloPassPipeline postprocessing_pipeline("gpu-schedule-postprocessing");
postprocessing_pipeline.AddPass<GpuSchedulePostprocessing>();
TF_RETURN_IF_ERROR(postprocessing_pipeline.Run(module).status());
return ScheduleMetadata{memory_limit};
}
HloInstructionSequence PostProcessSchedule(
const HloInstructionSequence& input) {
HloInstructionSequence result = PostprocessorToScheduleSyncCollectives(input);
return PostprocessorToScheduleAsEarlyOrLateAsPossible(result);
}
static int64_t GetSchedulerMemoryLimit(
const HloModule* module, const se::DeviceDescription& gpu_device_info,
int pointer_size) {
const int64_t base_limit =
module->config().device_memory_size() != 0
? module->config().device_memory_size()
: gpu_device_info.device_memory_size() * 80 / 100;
int64_t total_io_size = 0;
for (HloInstruction* param :
module->entry_computation()->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
param->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
total_io_size += GetSizeOfShape(subshape, pointer_size);
});
}
ShapeUtil::ForEachSubshape(
module->result_shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
total_io_size += GetSizeOfShape(subshape, pointer_size);
});
module->input_output_alias_config().ForEachAlias(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias&) {
const Shape& subshape =
ShapeUtil::GetSubshape(module->result_shape(), output_index);
total_io_size -= GetSizeOfShape(subshape, pointer_size);
});
int64_t limit =
(base_limit - total_io_size) *
module->config().debug_options().xla_gpu_memory_limit_slop_factor() / 100;
return limit;
}
}
} | #include "xla/service/gpu/gpu_hlo_schedule.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/backend.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace gpu {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class GpuHloScheduleTest : public HloTestBase {
protected:
using HloVec = std::vector<HloInstruction*>;
Shape f32_2x2_ = ShapeUtil::MakeShape(F32, {2, 2});
SequentialHloOrdering BuildHloOrdering(HloModule* module) {
Backend& test_backend = backend();
const se::DeviceDescription& gpu_device_info =
test_backend.default_stream_executor()->GetDeviceDescription();
TF_CHECK_OK(ScheduleGpuModule(module, 8, gpu_device_info)
.status());
return SequentialHloOrdering{module->schedule()};
}
HloModuleConfig GetModuleConfig(bool enable_latency_hiding_scheduler,
bool enable_gpu_async_tracker = false,
absl::string_view fdo_profile = "") {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(
enable_latency_hiding_scheduler);
debug_options.set_xla_gpu_lhs_enable_gpu_async_tracker(
enable_gpu_async_tracker);
config.set_debug_options(debug_options);
*config.mutable_fdo_profile() = fdo_profile;
return config;
}
std::unique_ptr<HloModule> CreateNewVerifiedModule(
bool enable_latency_hiding_scheduler = false) {
return std::make_unique<HloModule>(
"test_module", GetModuleConfig(enable_latency_hiding_scheduler));
}
static bool HasValidFingerprint(HloModule* module) {
const FrontendAttributes& attrs = module->frontend_attributes();
auto it = attrs.map().find(kFingerprintBeforeLHS);
return it != attrs.map().end() && it->second.size() == 128 / 4;
}
};
TEST_F(GpuHloScheduleTest, SequentialMatMul) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* dot1 =
builder.AddInstruction(CreateCanonicalDot(f32_2x2_, x, y));
HloInstruction* dot2 =
builder.AddInstruction(CreateCanonicalDot(f32_2x2_, dot1, z));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(dot2));
SequentialHloOrdering order = BuildHloOrdering(module.get());
EXPECT_TRUE(order.ExecutesBefore(y, x));
EXPECT_TRUE(order.ExecutesBefore(y, dot1));
EXPECT_TRUE(order.ExecutesBefore(z, dot1));
EXPECT_TRUE(order.ExecutesBefore(z, dot2));
EXPECT_TRUE(order.ExecutesBefore(dot1, dot2));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, SequentialAdd) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, y, z));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(add3));
SequentialHloOrdering order = BuildHloOrdering(module.get());
EXPECT_TRUE(order.ExecutesBefore(y, x));
EXPECT_TRUE(order.ExecutesBefore(y, add1));
EXPECT_TRUE(order.ExecutesBefore(z, add1));
EXPECT_TRUE(order.ExecutesBefore(z, add2));
EXPECT_TRUE(order.ExecutesBefore(add1, add2));
EXPECT_TRUE(order.ExecutesBefore(add2, add3));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, AsyncCustomCall) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z));
HloInstruction* nonblocking_call =
builder.AddInstruction(HloInstruction::CreateCustomCall(
f32_2x2_, {add0},
"nonblocking-call-start",
""));
static_cast<HloCustomCallInstruction*>(nonblocking_call)
->set_custom_call_schedule(SCHEDULE_EARLIEST);
TF_CHECK_OK(add1->AddControlDependencyTo(nonblocking_call));
HloInstruction* blocking_call =
builder.AddInstruction(HloInstruction::CreateCustomCall(
f32_2x2_, {nonblocking_call},
"blocking-call-done",
""));
static_cast<HloCustomCallInstruction*>(blocking_call)
->set_custom_call_schedule(SCHEDULE_LATEST);
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_2x2_, HloOpcode::kAdd, add3, blocking_call));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(add4));
SequentialHloOrdering order = BuildHloOrdering(module.get());
VLOG(2) << order.ToString();
EXPECT_TRUE(order.ExecutesBefore(add0, nonblocking_call));
EXPECT_TRUE(order.ExecutesBefore(add1, nonblocking_call));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add2));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add3));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add4));
EXPECT_TRUE(order.ExecutesBefore(add3, blocking_call));
EXPECT_TRUE(order.ExecutesBefore(blocking_call, add4));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, AsyncCollectivePermute) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z));
Shape u32_scalar = ShapeUtil::MakeShape(U32, {});
Shape collective_permute_start_shape =
ShapeUtil::MakeTupleShape({f32_2x2_, f32_2x2_});
HloInstruction* collective_permute_start =
builder.AddInstruction(HloInstruction::CreateCollectivePermuteStart(
collective_permute_start_shape, add0,
{{0, 1}}, std::nullopt));
TF_CHECK_OK(add1->AddControlDependencyTo(collective_permute_start));
HloInstruction* collective_permute_done = builder.AddInstruction(
HloInstruction::CreateUnary(f32_2x2_, HloOpcode::kCollectivePermuteDone,
collective_permute_start));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_2x2_, HloOpcode::kAdd, add3, collective_permute_done));
module->AddEntryComputation(builder.Build(add4));
SequentialHloOrdering order = BuildHloOrdering(module.get());
VLOG(2) << order.ToString();
EXPECT_TRUE(order.ExecutesBefore(add0, collective_permute_start));
EXPECT_TRUE(order.ExecutesBefore(add1, collective_permute_start));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add2));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add3));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add4));
EXPECT_TRUE(order.ExecutesBefore(add3, collective_permute_done));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_done, add4));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSCostModel) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
add0 = f32[32,32] add(dot0, dot1)
add1 = f32[32,32] add(add0, dot2)
add2 = f32[32,32] add(add1, dot3)
add3 = f32[32,32] add(add2, dot4)
add4 = f32[32,32] add(add3, dot5)
add5 = f32[32,32] add(add4, dot6)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add5)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
std::vector<int64_t> count_between_pairs;
bool in_between = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->opcode() == HloOpcode::kAllReduceStart) {
in_between = true;
count_between_pairs.push_back(0);
} else if (inst->opcode() == HloOpcode::kAllReduceDone) {
in_between = false;
} else if (in_between && inst->opcode() == HloOpcode::kCustomCall) {
count_between_pairs.back()++;
}
}
EXPECT_EQ(count_between_pairs.size(), 2);
EXPECT_GT(count_between_pairs[0], 0);
EXPECT_GT(count_between_pairs[1], 0);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSCostModelCostlyAR) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT apply_op = bf16[] add(x, y)
}
ENTRY ar {
p0 = bf16[32505856] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = bf16[32505856] all-reduce-start(p0), to_apply=apply_op
ar-done = bf16[32505856] all-reduce-done(ar-start)
ROOT t = (bf16[32505856], f32[32,32]) tuple(ar-done, dot6)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
std::vector<int64_t> count_between_pairs;
bool in_between = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->opcode() == HloOpcode::kAllReduceStart) {
in_between = true;
count_between_pairs.push_back(0);
} else if (inst->opcode() == HloOpcode::kAllReduceDone) {
in_between = false;
} else if (in_between && inst->opcode() == HloOpcode::kCustomCall) {
count_between_pairs.back()++;
}
}
EXPECT_EQ(count_between_pairs.size(), 1);
EXPECT_EQ(count_between_pairs[0], 7);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModel) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
struct SubTest {
std::string profile;
std::string target_start, target_done;
};
std::vector<SubTest> subtests;
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1000.0 }
costs { name: "ar-start1" cost_us: 10.0 }
)pb";
subtests.push_back({ar_long_latency_proto_text, "ar-start", "ar-done"});
const std::string ar1_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 10.0 }
costs { name: "ar-start1" cost_us: 1000.0 }
)pb";
tensorflow::profiler::ProfiledInstructionsProto profile;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
ar1_long_latency_proto_text, &profile));
std::string ar1_long_latency_proto_binary = profile.SerializeAsString();
subtests.push_back({profile.SerializeAsString(), "ar-start1", "ar-done1"});
for (const SubTest& subtest : subtests) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true,
subtest.profile)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
bool between_target_collective_pair = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->name() == subtest.target_start) {
between_target_collective_pair = true;
} else if (inst->name() == subtest.target_done) {
between_target_collective_pair = false;
} else if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kAdd) {
EXPECT_TRUE(between_target_collective_pair);
}
}
}
}
TEST_F(GpuHloScheduleTest,
ProfileGuidedCostModelApplicabilityListsMissingCostsAndLatencies) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, dot0)
})";
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 10.0 }
costs { name: "ar-start-2" cost_us: 10.0 }
)pb";
tensorflow::profiler::ProfiledInstructionsProto profile;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
ar_long_latency_proto_text, &profile));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(
hlo_text,
GetModuleConfig(true,
true,
ar_long_latency_proto_text)));
absl::Status result = IsProfileApplicable(module.get(), profile);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(result.message(), HasSubstr("add0"));
EXPECT_THAT(result.message(), HasSubstr("dot1"));
EXPECT_THAT(result.message(), HasSubstr("ar-start-2"));
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelWithRematData) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1.0 }
costs { name: "ar-start1" cost_us: 1.0 }
costs { name: "ar-start.remat100" cost_us: 2000.0 }
)pb";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text,
GetModuleConfig(true,
true,
ar_long_latency_proto_text)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
bool between_target_collective_pair = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->name() == "ar-start") {
between_target_collective_pair = true;
} else if (inst->name() == "ar-done") {
between_target_collective_pair = false;
} else if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kAdd) {
EXPECT_TRUE(between_target_collective_pair);
}
}
}
TEST_F(GpuHloScheduleTest, LHSSendRecv) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1
send-done = token[] send-done(send), channel_id=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init),
body=while_body, condition=while_cond
ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* while_body = module->GetComputationWithName("while_body");
const std::vector<HloInstruction*>& instruction_sequence =
order.SequentialOrder(*while_body)->instructions();
auto get_index = [&](absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_LT(get_index("recv"), get_index("send"));
EXPECT_LT(get_index("send"), get_index("recv-done"));
EXPECT_GE(get_index("send-done") - get_index("recv-done"), 8);
EXPECT_LT(abs(get_index("send-done") - get_index("result")), 2);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSSendRecvPairs2) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all-0 = token[] after-all()
recv-0 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
send-0 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-0),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
recv-done-0 = (f32[1, 1024, 1024], token[]) recv-done(recv-0), channel_id=1
send-done-0 = token[] send-done(send-0), channel_id=1
recv-data-0 = f32[1, 1024, 1024] get-tuple-element(recv-done-0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
bc1 = f32[1, 1024, 1024] broadcast(conv), dimensions={}
after-all-1 = token[] after-all()
recv-1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1, 0}}"
}
send-1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1, 0}}"
}
recv-done-1 = (f32[1, 1024, 1024], token[]) recv-done(recv-1), channel_id=2
send-done-1 = token[] send-done(send-1), channel_id=2
recv-data-1 = f32[1, 1024, 1024] get-tuple-element(recv-done-1), index=0
add2 = f32[1, 1024, 1024] add(recv-data-0, bc1)
add = f32[1, 1024, 1024] add(recv-data-1, add2)
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, add)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init),
body=while_body, condition=while_cond
ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* while_body = module->GetComputationWithName("while_body");
const std::vector<HloInstruction*>& instruction_sequence =
order.SequentialOrder(*while_body)->instructions();
auto get_index = [&](absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_TRUE(HasValidFingerprint(module.get()));
EXPECT_LT(get_index("recv-1"), get_index("send-1"));
EXPECT_LT(get_index("send-1"), get_index("recv-done-1"));
EXPECT_GT(get_index("send-done-1"), get_index("send-1"));
EXPECT_LT(get_index("send-done-1"), get_index("recv-0"));
EXPECT_LT(abs(get_index("send-done-0") - get_index("result")), 2);
}
TEST_F(GpuHloScheduleTest, LHSSendRecvAllReduce) {
const char* hlo_text = R"(
HloModule test
add (x: f32[], y: f32[]) -> f32[] {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(f32[] x, f32[] y)
}
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
} | 2,042 |
#ifndef XLA_SERVICE_GPU_HLO_TRAVERSAL_H_
#define XLA_SERVICE_GPU_HLO_TRAVERSAL_H_
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
class HloFusionAdaptor;
class HloInstructionAdaptor {
public:
HloInstructionAdaptor() = default;
HloInstructionAdaptor(const HloInstruction& instruction,
const HloFusionAdaptor* parent)
: instruction_(&instruction), parent_(parent) {}
HloOpcode opcode() const { return instruction_->opcode(); }
absl::string_view name() const { return instruction_->name(); }
HloInstructionAdaptor GetOperand(int index) const;
absl::InlinedVector<HloInstructionAdaptor, 2> GetOperands() const;
absl::InlinedVector<HloInstructionAdaptor, 2> GetUsers() const;
const xla::Shape& shape() const { return instruction_->shape(); }
std::string ToString() const { return instruction_->ToString(); }
friend bool operator==(const HloInstructionAdaptor& lhs,
const HloInstructionAdaptor& rhs);
template <typename H>
friend H AbslHashValue(H h, const HloInstructionAdaptor& m);
const HloInstruction& instruction() const { return *instruction_; }
const HloFusionAdaptor& parent() const { return *parent_; }
private:
const HloInstruction* instruction_;
const HloFusionAdaptor* parent_;
};
template <typename H>
H AbslHashValue(H h, const HloInstructionAdaptor& m) {
return H::combine(std::move(h), m.instruction_->GetModule(),
m.instruction_->unique_id());
}
template <HloOpcode op, HloOpcode... rest>
bool IsOpcodeAnyOf(const HloInstruction* instr) {
return (instr->opcode() == op) || ((instr->opcode() == rest) || ...);
}
namespace internal {
class HloFusionInstructionAdaptor {
public:
virtual ~HloFusionInstructionAdaptor() = default;
virtual bool ContainsInstruction(const HloInstruction* instruction) const = 0;
virtual absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const = 0;
virtual absl::InlinedVector<const HloInstruction*, 2> GetParameters()
const = 0;
virtual const HloInstruction& FusionInstruction() const = 0;
virtual absl::InlinedVector<HloInstructionAdaptor, 2>
MakeInstructionPostOrder() const = 0;
virtual std::string ToString() const = 0;
};
}
class HloFusionAdaptor {
public:
bool ContainsInstruction(HloInstructionAdaptor instruction) const;
bool ContainsInstruction(const HloInstruction* instruction) const;
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const;
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const;
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const;
std::string ToString() const;
static std::unique_ptr<HloFusionAdaptor> ForInstruction(
const HloInstruction* instruction);
static std::unique_ptr<HloFusionAdaptor> ForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer);
static std::unique_ptr<HloFusionAdaptor> ForComputation(
const HloComputation* computation);
private:
void AddInstruction(const HloInstruction* instruction);
void AddComputation(const HloComputation* computation);
absl::InlinedVector<std::unique_ptr<internal::HloFusionInstructionAdaptor>, 2>
fusion_instructions_;
};
enum class TraversalResult {
kAdvance,
kInterrupt,
kSkip,
};
void HloBfsConsumersFirstTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node,
const std::function<void(HloInstructionAdaptor producer)>& visit_arg =
[](HloInstructionAdaptor) {});
void HloBfsProducersFirstTraversal(
absl::Span<const HloInstructionAdaptor> producers,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node);
bool HloAnyOf(absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands = true);
bool HloAnyOf(absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands = true);
std::optional<HloInstructionAdaptor> HloFindIf(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands = true);
std::optional<const HloInstruction*> HloFindIf(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands = true);
std::vector<const HloInstruction*> HloFindAll(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands = true);
std::vector<HloInstructionAdaptor> HloFindUseChain(HloInstructionAdaptor parent,
HloInstructionAdaptor root);
}
}
#endif
#include "xla/service/gpu/hlo_traversal.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <queue>
#include <sstream>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
namespace gpu {
namespace {
template <typename F>
void ResolveUsers(const HloInstruction* value, const HloInstruction* user,
const HloFusionAdaptor& fusion_adaptor, F&& fn) {
if (user->opcode() == HloOpcode::kTuple && user->IsRoot()) {
if (auto* fusion = user->parent()->FusionInstruction()) {
for (const auto* gte : fusion->users()) {
if (gte->opcode() != HloOpcode::kGetTupleElement) {
fn(gte);
continue;
}
for (const auto* gte_user : gte->users()) {
ResolveUsers(gte, gte_user, fusion_adaptor, fn);
}
}
}
} else if (fusion_adaptor.ContainsInstruction(user) &&
user->opcode() == HloOpcode::kFusion) {
auto* param = user->fused_parameter(user->operand_index(value));
for (const auto* param_user : param->users()) {
fn(param_user);
}
} else {
fn(user);
}
}
const HloInstruction* ResolveOperand(const HloInstruction* operand,
const HloFusionAdaptor& fusion_adaptor) {
if (operand->opcode() == HloOpcode::kGetTupleElement &&
operand->operand(0)->opcode() == HloOpcode::kFusion &&
operand->operand(0)->fused_expression_root()->opcode() ==
HloOpcode::kTuple &&
fusion_adaptor.ContainsInstruction(operand->operand(0))) {
return operand->operand(0)->fused_expression_root()->operand(
operand->tuple_index());
}
if (!fusion_adaptor.ContainsInstruction(operand)) {
return operand;
}
if (operand->opcode() == HloOpcode::kFusion) {
return operand->fused_expression_root();
}
if (operand->opcode() == HloOpcode::kParameter) {
if (auto* fusion = operand->parent()->FusionInstruction()) {
return ResolveOperand(fusion->operand(operand->parameter_number()),
fusion_adaptor);
}
}
return operand;
}
}
class SingleInstructionFusion : public internal::HloFusionInstructionAdaptor {
public:
explicit SingleInstructionFusion(const HloInstruction* instruction,
const HloFusionAdaptor* parent)
: instruction_(instruction), parent_(parent) {
CHECK_NE(instruction->opcode(), HloOpcode::kFusion)
<< "Use HloComputationFusion";
}
bool ContainsInstruction(const HloInstruction* instruction) const override {
return instruction == instruction_;
}
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override {
return {HloInstructionAdaptor{*instruction_, parent_}};
}
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override {
const auto& operands = instruction_->operands();
return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(),
operands.end());
}
const HloInstruction& FusionInstruction() const override {
return *instruction_;
}
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const override {
return {HloInstructionAdaptor{*instruction_, parent_}};
}
std::string ToString() const override { return instruction_->ToString(); }
private:
const HloInstruction* instruction_;
const HloFusionAdaptor* parent_;
};
class HloComputationFusion : public internal::HloFusionInstructionAdaptor {
public:
explicit HloComputationFusion(const HloComputation* computation,
const HloFusionAdaptor* parent)
: computation_(computation), parent_(parent) {
CHECK(computation->IsFusionComputation());
roots_ = FindRoots(computation);
}
absl::InlinedVector<HloInstructionAdaptor, 2> FindRoots(
const HloComputation* computation) {
absl::InlinedVector<HloInstructionAdaptor, 2> roots;
std::function<void(const HloInstruction*)> get_roots;
get_roots = [&](const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kTuple) {
for (const auto* operand : instr->operands()) {
get_roots(operand);
}
} else {
HloInstructionAdaptor wrapped{*instr, parent_};
roots.push_back(wrapped);
}
};
get_roots(computation->root_instruction());
return roots;
}
bool ContainsInstruction(const HloInstruction* instruction) const override {
return instruction->parent() == computation_ ||
instruction == computation_->FusionInstruction();
}
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override {
CHECK(!roots_.empty())
<< "No roots found in the computation. HloFusionAdaptor was likely "
"created for a non-fusion computation: "
<< computation_->ToString();
return roots_;
}
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override {
const auto& operands = computation_->FusionInstruction()->operands();
return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(),
operands.end());
}
const HloInstruction& FusionInstruction() const override {
return *computation_->FusionInstruction();
}
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const override {
auto post_order = computation_->MakeInstructionPostOrder();
absl::InlinedVector<HloInstructionAdaptor, 2> result;
result.reserve(post_order.size() - computation_->num_parameters());
for (auto* instr : post_order) {
if (instr->opcode() == HloOpcode::kParameter ||
(instr->opcode() == HloOpcode::kTuple && instr->IsRoot())) {
continue;
}
result.emplace_back(*instr, parent_);
}
return result;
}
std::string ToString() const override { return computation_->ToString(); }
private:
const HloComputation* computation_;
absl::InlinedVector<HloInstructionAdaptor, 2> roots_;
const HloFusionAdaptor* parent_;
};
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForInstruction(
const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
return ForComputation(instruction->fused_instructions_computation());
}
auto fusion_adaptor = std::make_unique<HloFusionAdaptor>();
fusion_adaptor->AddInstruction(instruction);
return fusion_adaptor;
}
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer) {
auto fusion_adaptor = std::make_unique<HloFusionAdaptor>();
fusion_adaptor->AddInstruction(producer);
fusion_adaptor->AddInstruction(consumer);
return fusion_adaptor;
}
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForComputation(
const HloComputation* computation) {
auto fusion_adaptor = std::make_unique<HloFusionAdaptor>();
fusion_adaptor->AddComputation(computation);
return fusion_adaptor;
}
bool HloFusionAdaptor::ContainsInstruction(
HloInstructionAdaptor instruction) const {
return ContainsInstruction(&instruction.instruction());
}
bool HloFusionAdaptor::ContainsInstruction(
const HloInstruction* instruction) const {
for (const auto& fusion_instruction : fusion_instructions_) {
if (fusion_instruction->ContainsInstruction(instruction)) return true;
}
return false;
}
absl::InlinedVector<HloInstructionAdaptor, 2> HloFusionAdaptor::GetRoots()
const {
auto roots = fusion_instructions_.back()->GetRoots();
if (fusion_instructions_.size() == 1) {
return roots;
}
CHECK_EQ(fusion_instructions_.size(), 2);
auto producer_roots = fusion_instructions_[0]->GetRoots();
const HloInstruction& producer_fusion =
fusion_instructions_[0]->FusionInstruction();
const HloInstruction& consumer_fusion =
fusion_instructions_.back()->FusionInstruction();
for (auto& root : roots) {
if (root.opcode() != HloOpcode::kParameter) {
continue;
}
const HloInstruction* operand =
consumer_fusion.operand(root.instruction().parameter_number());
int64_t root_index = 0;
if (operand->opcode() == HloOpcode::kGetTupleElement) {
root_index = operand->tuple_index();
operand = operand->operand(0);
}
if (operand == &producer_fusion) {
root = producer_roots[root_index];
}
}
if (!producer_fusion.IsMultiOutputFusion()) {
return roots;
}
absl::flat_hash_set<int64_t> root_indices_with_outside_usage;
for (HloInstruction* instr : producer_fusion.users()) {
bool has_outside_user = false;
int64_t root_index = 0;
if (instr->opcode() == HloOpcode::kGetTupleElement) {
for (HloInstruction* user : instr->users()) {
if (user != &consumer_fusion) {
root_index = instr->tuple_index();
has_outside_user = true;
break;
}
}
} else if (instr != &consumer_fusion) {
has_outside_user = true;
}
if (has_outside_user) {
root_indices_with_outside_usage.insert(root_index);
}
}
for (int64_t i = 0; i < producer_roots.size(); ++i) {
if (!root_indices_with_outside_usage.contains(i)) {
continue;
}
if (producer_roots[i].opcode() != HloOpcode::kParameter) {
roots.push_back(producer_roots[i]);
}
}
return roots;
}
absl::InlinedVector<const HloInstruction*, 2> HloFusionAdaptor::GetParameters()
const {
if (fusion_instructions_.size() == 1) {
return fusion_instructions_.back()->GetParameters();
}
CHECK_EQ(fusion_instructions_.size(), 2);
absl::InlinedVector<const HloInstruction*, 2> combined_parameters;
const HloInstruction& producer_fusion =
fusion_instructions_[0]->FusionInstruction();
for (const auto& param : fusion_instructions_.back()->GetParameters()) {
const HloInstruction* operand = param;
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->operand(0);
}
if (operand != &producer_fusion) {
combined_parameters.push_back(param);
}
}
absl::flat_hash_set<const HloInstruction*> params(combined_parameters.begin(),
combined_parameters.end());
auto producer_roots = fusion_instructions_[0]->GetRoots();
absl::flat_hash_set<const HloInstruction*> parameters_to_skip;
for (const auto& root : producer_roots) {
if (root.opcode() == HloOpcode::kParameter) {
if (&root.instruction() == &producer_fusion) {
parameters_to_skip.insert(&producer_fusion);
} else if (root.instruction().user_count() <= 1) {
parameters_to_skip.insert(
producer_fusion.operand(root.instruction().parameter_number()));
}
}
}
for (auto param : fusion_instructions_[0]->GetParameters()) {
if (!parameters_to_skip.contains(param) && params.insert(param).second) {
combined_parameters.push_back(param);
}
}
return combined_parameters;
}
absl::InlinedVector<HloInstructionAdaptor, 2>
HloFusionAdaptor::MakeInstructionPostOrder() const {
absl::InlinedVector<HloInstructionAdaptor, 2> result_post_order;
for (const auto& fusion_instruction : fusion_instructions_) {
absl::c_move(fusion_instruction->MakeInstructionPostOrder(),
std::back_inserter(result_post_order));
}
return result_post_order;
}
std::string HloFusionAdaptor::ToString() const {
std::ostringstream ss;
for (const auto& fusion_instruction : fusion_instructions_) {
ss << fusion_instruction->ToString() << "\n";
}
return ss.str();
}
void HloFusionAdaptor::AddInstruction(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
AddComputation(instruction->fused_instructions_computation());
} else {
fusion_instructions_.push_back(
std::make_unique<SingleInstructionFusion>(instruction, this));
}
}
void HloFusionAdaptor::AddComputation(const HloComputation* computation) {
fusion_instructions_.push_back(
std::make_unique<HloComputationFusion>(computation, this));
}
absl::InlinedVector<HloInstructionAdaptor, 2>
HloInstructionAdaptor::GetOperands() const {
absl::InlinedVector<HloInstructionAdaptor, 2> operands;
if (instruction_->opcode() == HloOpcode::kParameter) {
auto operand = ResolveOperand(instruction_, *parent_);
if (operand != instruction_) {
operands.emplace_back(*operand, parent_);
}
} else {
for (const auto* operand : instruction_->operands()) {
operands.emplace_back(*ResolveOperand(operand, *parent_), parent_);
}
}
return operands;
}
HloInstructionAdaptor HloInstructionAdaptor::GetOperand(int index) const {
return HloInstructionAdaptor{
*ResolveOperand(instruction_->operand(index), *parent_), parent_};
}
absl::InlinedVector<HloInstructionAdaptor, 2> HloInstructionAdaptor::GetUsers()
const {
absl::InlinedVector<HloInstructionAdaptor, 2> users;
auto add_user = [&](const HloInstruction* instr) {
users.emplace_back(*instr, parent_);
};
if (instruction_->IsRoot()) {
if (auto* fusion = instruction_->parent()->FusionInstruction()) {
for (auto* user : fusion->users()) {
ResolveUsers(fusion, user, *parent_, add_user);
}
}
}
for (auto* user : instruction_->users()) {
ResolveUsers(instruction_, user, *parent_, add_user);
}
return users;
}
bool operator==(const HloInstructionAdaptor& lhs,
const HloInstructionAdaptor& rhs) {
return lhs.instruction_->GetModule() == rhs.instruction_->GetModule() &&
lhs.instruction_->unique_id() == rhs.instruction_->unique_id();
}
namespace {
void HloBfsTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node,
const std::function<void(HloInstructionAdaptor producer)>& visit_arg,
bool visit_operands) {
absl::flat_hash_set<HloInstructionAdaptor> visited;
std::queue<HloInstructionAdaptor> q;
auto enqueue = [&](const HloInstructionAdaptor& node) {
const auto& adjacent_nodes =
visit_operands ? node.GetOperands() : node.GetUsers();
for (const auto& node : adjacent_nodes) {
if (visited.insert(node).second) {
if (fusion.ContainsInstruction(node)) {
q.push(node);
} else {
visit_arg(node);
}
}
}
};
for (auto root : roots) {
if (visited.insert(root).second) {
q.push(root);
}
}
while (!q.empty()) {
HloInstructionAdaptor node = q.front();
q.pop();
switch (visit_node(node)) {
case TraversalResult::kAdvance:
enqueue(node);
break;
case TraversalResult::kInterrupt:
return;
case TraversalResult::kSkip:
break;
}
}
}
}
void HloBfsConsumersFirstTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node,
const std::function<void(HloInstructionAdaptor producer)>& visit_arg) {
HloBfsTraversal(roots, fusion, visit_node, visit_arg,
true);
}
void HloBfsProducersFirstTraversal(
absl::Span<const HloInstructionAdaptor> producers,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node) {
HloBfsTraversal(
producers, fusion, visit_node, [](HloInstructionAdaptor) {},
false);
}
bool HloAnyOf(absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands) {
return HloFindIf(roots, fusion, visit, visit_operands).has_value();
}
bool HloAnyOf(absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
return HloFindIf(roots, visit, visit_operands).has_value();
}
std::optional<HloInstructionAdaptor> HloFindIf(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands) {
std::optional<HloInstructionAdaptor> result = std::nullopt;
HloBfsTraversal(
roots, fusion,
[&](HloInstructionAdaptor node) {
if (visit(node)) {
result = node;
return TraversalResult::kInterrupt;
}
return TraversalResult::kAdvance;
},
[](HloInstructionAdaptor) {}, visit_operands);
return result;
}
std::vector<const HloInstruction*> HloFindAllImpl(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands, bool find_first_only = false) {
std::vector<const HloInstruction*> result;
absl::flat_hash_set<const HloInstruction*> visited;
std::queue<const HloInstruction*> q;
auto enqueue = [&](const HloInstruction* node) {
if (visit_operands) {
for (const HloInstruction* operand : node->operands()) {
if (visited.insert(operand).second) {
q.push(operand);
}
}
} else {
for (const HloInstruction* operand : node->users()) {
if (visited.insert(operand).second) {
q.push(operand);
}
}
}
};
for (auto root : roots) {
if (visited.insert(root).second) {
q.push(root);
}
}
while (!q.empty()) {
const HloInstruction* node = q.front();
q.pop();
if (visit(node)) {
result.push_back(node);
if (find_first_only) {
return result;
}
}
enqueue(node);
}
return result;
}
std::optional<const HloInstruction*> HloFindIf(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
auto result = HloFindAllImpl(roots, visit, visit_operands,
true);
if (result.empty()) {
return std::nullopt;
}
return result[0];
}
std::vector<const HloInstruction*> HloFindAll(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
std::vector<const HloInstruction*> result;
return HloFindAllImpl(roots, visit, visit_operands);
}
std::vector<HloInstructionAdaptor> HloFindUseChain(HloInstructionAdaptor parent,
HloInstructionAdaptor root) {
absl::flat_hash_set<HloInstructionAdaptor> visited;
std::vector<HloInstructionAdaptor> result;
std::function<bool(HloInstructionAdaptor)> visit;
visit = [&](HloInstructionAdaptor node) {
if (node == root) return true;
for (const auto& user : node.GetUsers()) {
if (visited.insert(user).second && visit(user)) {
result.push_back(user);
return true;
}
}
return false;
};
if (visit(parent)) {
result.push_back(parent);
std::reverse(result.begin(), result.end());
} else {
result.clear();
}
return result;
}
}
} | #include "xla/service/gpu/hlo_traversal.h"
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
MATCHER_P(InstructionAdaptorName, name, "") { return arg.name() == name; }
class HloTraversalTest : public HloTestBase {};
const char kTestModule[] = R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_1 {
p0.2 = f32[] parameter(0)
zero = f32[] constant(0.0)
is_positive = pred[] compare(p0.2, zero), direction=GE
not = pred[] not(is_positive)
ROOT tuple = (pred[], pred[]) tuple(is_positive, not)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
log = f32[128] log(sum)
negate = f32[128] negate(log)
fusion = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation
fusion2 = (pred[], pred[]) fusion(fusion), kind=kLoop, calls=fused_computation_1
gte = pred[] get-tuple-element(fusion2), index=0
ROOT select = f32[] select(gte, fusion, p0)
})";
TEST_F(HloTraversalTest, AdaptorOperands) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion2"),
module->entry_computation()->GetInstructionWithName("select"));
HloInstructionAdaptor instr = fusion_adaptor->GetRoots()[0];
EXPECT_THAT(instr.GetOperands(),
ElementsAre(InstructionAdaptorName("is_positive"),
InstructionAdaptorName("fusion"),
InstructionAdaptorName("p0")));
}
TEST_F(HloTraversalTest, AdaptorUsers) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation {
p0 = f32[] parameter(0)
neg = f32[] negate(p0)
add = f32[] add(p0, neg)
ROOT t = (f32[], f32[]) tuple(neg, add)
}
fused_computation_1 {
p0.0 = f32[] parameter(0)
mul = f32[] multiply(p0.0, p0.0)
ROOT neg.1 = f32[] negate(mul)
}
ENTRY entry {
p0 = f32[] parameter(0)
fusion = (f32[], f32[]) fusion(p0), kind=kLoop, calls=fused_computation
gte = f32[] get-tuple-element(fusion), index=0
add.1 = f32[] add(p0, gte)
fusion2 = f32[] fusion(gte), kind=kLoop, calls=fused_computation_1
exp.1 = f32[] exponential(fusion2)
ROOT res = (f32[], (f32[], f32[]), f32[], f32[]) tuple(add.1, fusion, fusion2, exp.1)
}
)")
.value();
auto fusion_adaptor1 = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion"),
module->entry_computation()->GetInstructionWithName("fusion2"));
HloInstructionAdaptor add{*module->GetComputationWithName("fused_computation")
->GetInstructionWithName("add"),
fusion_adaptor1.get()};
EXPECT_THAT(add.GetUsers(), ElementsAre(InstructionAdaptorName("add.1"),
InstructionAdaptorName("mul"),
InstructionAdaptorName("res")));
auto fusion_adaptor2 = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion2"));
HloInstructionAdaptor mul{
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("mul"),
fusion_adaptor2.get()};
EXPECT_THAT(mul.GetUsers(), ElementsAre(InstructionAdaptorName("neg.1")));
HloInstructionAdaptor neg{
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("neg.1"),
fusion_adaptor2.get()};
EXPECT_THAT(neg.GetUsers(), ElementsAre(InstructionAdaptorName("exp.1")));
}
TEST_F(HloTraversalTest, TraverseFusionConsumerFirst) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
std::vector<std::string> visited_args;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
HloBfsConsumersFirstTraversal(
fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor arg) {
visited_args.emplace_back(arg.name());
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
EXPECT_THAT(visited_args, ElementsAre("p0", "negate"));
}
TEST_F(HloTraversalTest,
TraverseFusionConsumerFirstFromFusionRootAndInnerNode) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
std::vector<std::string> visited_args;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto root = fusion->GetRoots()[0];
HloBfsConsumersFirstTraversal(
{root, root.GetOperand(0)}, *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor arg) {
visited_args.emplace_back(arg.name());
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
EXPECT_THAT(visited_args, ElementsAre("p0", "negate"));
}
TEST_F(HloTraversalTest, TraverseFusionProducerFirst) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto root = fusion->GetRoots()[0];
HloBfsProducersFirstTraversal({root.GetOperand(0)}, *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(visited_nodes, ElementsAre("mul", "reduce.1"));
}
TEST_F(HloTraversalTest, AbortTraversal) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> visited_nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return node.opcode() == HloOpcode::kReduce
? TraversalResult::kAdvance
: TraversalResult::kInterrupt;
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest, FindArguments) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> producers;
absl::c_for_each(fusion->GetParameters(),
[&](const HloInstruction* producer) {
producers.emplace_back(producer->name());
});
EXPECT_THAT(producers, ElementsAre("p0", "negate"));
}
TEST_F(HloTraversalTest, FindArgumentsAfterFusion) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("negate"),
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> producers;
absl::c_for_each(fusion->GetParameters(),
[&](const HloInstruction* producer) {
producers.emplace_back(producer->name());
});
EXPECT_THAT(producers, ElementsAre("p0", "log"));
}
TEST_F(HloTraversalTest, FindIf) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto result =
HloFindIf(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) {
return node.opcode() == HloOpcode::kMultiply;
});
ASSERT_NE(result, std::nullopt);
ASSERT_EQ(result->name(), "mul");
}
TEST_F(HloTraversalTest, NotFound) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto result = HloFindIf(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) { return false; });
ASSERT_EQ(result, std::nullopt);
}
TEST_F(HloTraversalTest, FindAllMultiple) {
const char kConverts[] = R"(
HloModule test
ENTRY entry {
p0 = s8[128] parameter(0)
p1 = pred[128] parameter(1)
p1c = s8[128] convert(p1)
p1c1 = f16[128] convert(p1c)
p0c = f16[128] convert(p0)
ROOT diff = f16[128] subtract(p0c, p1c1)
})";
auto module = ParseAndReturnVerifiedModule(kConverts).value();
auto root = module->entry_computation()->GetInstructionWithName("diff");
std::vector<const HloInstruction*> converts =
HloFindAll({root}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kConvert;
});
auto get = [&](absl::string_view name) {
return module->entry_computation()->GetInstructionWithName(name);
};
EXPECT_THAT(converts, ElementsAre(get("p0c"), get("p1c1"), get("p1c")));
}
TEST_F(HloTraversalTest, FindAllNotFound) {
const char kConverts[] = R"(
HloModule test
ENTRY entry {
p0 = s8[128] parameter(0)
p1 = f16[128] parameter(1)
p0c = f16[128] convert(p0)
ROOT diff = f16[128] subtract(p0c, p1)
})";
auto module = ParseAndReturnVerifiedModule(kConverts).value();
auto root = module->entry_computation()->GetInstructionWithName("diff");
std::vector<const HloInstruction*> converts =
HloFindAll({root}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kAdd;
});
EXPECT_THAT(converts, IsEmpty());
}
const char kTwoFusions[] = R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation_1 {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_2 {
p0.2 = f32[] parameter(0)
p1.2 = f32[128] parameter(1)
ROOT reduce.2 = f32[] reduce(p1.2, p0.2), dimensions={0}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
negate = f32[128] negate(sum)
fusion.1 = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[] fusion(fusion.1, negate), kind=kLoop, calls=fused_computation_2
ROOT difference = f32[] subtract(fusion.2, p0)
})";
TEST_F(HloTraversalTest, FuseFusionConsumer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer = module->entry_computation()->GetInstructionWithName("negate");
auto consumer =
module->entry_computation()->GetInstructionWithName("fusion.1");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
HloInstructionAdaptor reduce_1(
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("reduce.1"),
fusion.get());
EXPECT_THAT(reduce_1.GetUsers(),
ElementsAre(InstructionAdaptorName("fusion.2")));
std::vector<std::string> nodes;
std::vector<std::string> params;
HloBfsConsumersFirstTraversal(
fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor param) { params.emplace_back(param.name()); });
EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul", "negate"));
EXPECT_THAT(params, ElementsAre("p0", "sum"));
}
TEST_F(HloTraversalTest, FuseFusionProducer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("fusion.2");
auto consumer =
module->entry_computation()->GetInstructionWithName("difference");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
HloInstructionAdaptor reduce_2(
*module->GetComputationWithName("fused_computation_2")
->GetInstructionWithName("reduce.2"),
fusion.get());
EXPECT_THAT(reduce_2.GetOperands(),
ElementsAre(InstructionAdaptorName("negate"),
InstructionAdaptorName("fusion.1")));
std::vector<std::string> nodes;
std::vector<std::string> params;
HloBfsConsumersFirstTraversal(
fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
},
[&](HloInstructionAdaptor arg) { params.emplace_back(arg.name()); });
EXPECT_THAT(nodes, ElementsAre("difference", "reduce.2"));
EXPECT_THAT(params, ElementsAre("p0", "negate", "fusion.1"));
}
TEST_F(HloTraversalTest, FuseFusionConsumerAndProducer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("fusion.1");
auto consumer =
module->entry_computation()->GetInstructionWithName("fusion.2");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
std::vector<std::string> params;
absl::c_for_each(fusion->GetParameters(), [&](const HloInstruction* param) {
params.emplace_back(param->name());
});
EXPECT_THAT(nodes, ElementsAre("reduce.2", "reduce.1", "mul"));
EXPECT_THAT(params, ElementsAre("negate", "p0"));
}
TEST_F(HloTraversalTest, FuseNonFusionConsumerAndProducer) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto producer = module->entry_computation()->GetInstructionWithName("log");
auto consumer = module->entry_computation()->GetInstructionWithName("negate");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("negate", "log"));
}
TEST_F(HloTraversalTest, SingleInstructionFusionOfFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion.1"));
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest, SingleInstructionFusionOfInstruction) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("negate"));
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("negate"));
}
TEST_F(HloTraversalTest, MultiOutputFusionDuplicateRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation {
p0.1 = f32[128] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p0.1, p1.1)
ROOT res = (f32[128], f32[128]) tuple(mul, mul)
}
ENTRY entry {
p0 = f32[128] parameter(0)
p1 = f32[128] parameter(1)
ROOT fusion = (f32[128], f32[128]) fusion(p0, p1), kind=kLoop, calls=fused_computation
})")
.value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
EXPECT_THAT(fusion->GetRoots(), ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("mul")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_SingleInstruction) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("negate"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("negate")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoFusions) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion.1"),
module->entry_computation()->GetInstructionWithName("fusion.2"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("reduce.1"),
InstructionAdaptorName("reduce.2")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoMultiOutputFusions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation_1 {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
ROOT t = (f32[128], f32[]) tuple(mul, reduce.1)
}
fused_computation_2 {
p0.2 = f32[] parameter(0)
p1.2 = f32[128] parameter(1)
neg = f32[128] negate(p1.2)
reduce.2 = f32[] reduce(neg, p0.2), dimensions={0}, to_apply=scalar_add_computation
ROOT t2 = (f32[], f32[128]) tuple(reduce.2, neg)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
negate = f32[128] negate(sum)
fusion.1 = (f32[128], f32[]) fusion(p0, negate), kind=kLoop, calls=fused_computation_1
gte1 = f32[128] get-tuple-element(fusion.1), index=0
gte2 = f32[] get-tuple-element(fusion.1), index=1
fusion.2 = (f32[], f32[128]) fusion(p0, gte1), kind=kLoop, calls=fused_computation_2
gte3 = f32[] get-tuple-element(fusion.2), index=0
gte4 = f32[128] get-tuple-element(fusion.2), index=1
difference = f32[] subtract(gte3, p0)
ROOT res = (f32[], f32[128]) tuple(difference, gte4)
})")
.value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion.1"),
module->entry_computation()->GetInstructionWithName("fusion.2"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("reduce.1"),
InstructionAdaptorName("neg"),
InstructionAdaptorName("reduce.2")));
}
const char kTwoMultiOutputFusions[] = R"(
HloModule mof
mof_producer {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
param2 = f32[10]{0} parameter(2)
add = f32[10]{0} add(param0, param1)
sub = f32[10]{0} subtract(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(param1, add, sub, param0, param2)
}
mof_consumer {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
param2.0 = f32[10]{0} parameter(2)
mul = f32[10]{0} multiply(param0.0, param1.0)
div = f32[10]{0} divide(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param2.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
p2 = f32[10]{0} parameter(2)
producer = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(p0, p1, p2), kind=kLoop, calls=mof_producer
gte0 = f32[10]{0} get-tuple-element(producer), index=0
gte1 = f32[10]{0} get-tuple-element(producer), index=1
gte2 = f32[10]{0} get-tuple-element(producer), index=2
gte3 = f32[10]{0} get-tuple-element(producer), index=3
gte4 = f32[10]{0} get-tuple-element(producer), index=4
consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2, gte3), kind=kLoop, calls=mof_consumer
gte5 = f32[10]{0} get-tuple-element(consumer), index=0
gte6 = f32[10]{0} get-tuple-element(consumer), index=1
gte7 = f32[10]{0} get-tuple-element(consumer), index=2
ROOT res = tuple(gte0, gte1, gte3, gte4, gte5, gte6, gte7)
})";
TEST_F(HloTraversalTest, GetParametersMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("producer");
auto consumer =
module->entry_computation()->GetInstructionWithName("consumer");
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(producer, consumer);
auto p0 = module->entry_computation()->GetInstructionWithName("p0");
auto p1 = module->entry_computation()->GetInstructionWithName("p1");
EXPECT_THAT(fusion_adaptor->GetParameters(), ElementsAre(p0, p1));
consumer->MergeFusionInstructionIntoMultiOutput(producer);
EXPECT_THAT(consumer->operands(), ElementsAre(p0, p1));
}
TEST_F(HloTraversalTest, GetRootsMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value();
auto consumer_fusion_instr =
module->entry_computation()->GetInstructionWithName("consumer");
auto producer_fusion_instr =
module->entry_computation()->GetInstructionWithName("producer");
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(
producer_fusion_instr, consumer_fusion_instr);
auto producer_computation = module->GetComputationWithName("mof_producer");
auto producer = HloFusionAdaptor::ForComputation(producer_computation);
auto consumer_computation = module->GetComputationWithName("mof_consumer");
auto consumer = HloFusionAdaptor::ForComputation(consumer_computation);
EXPECT_THAT(fusion_adaptor->GetRoots(),
ElementsAre(
HloInstructionAdaptor{
*consumer_computation->GetInstructionWithName("mul"),
consumer.get()},
HloInstructionAdaptor{
*consumer_computation->GetInstructionWithName("div"),
consumer.get()},
HloInstructionAdaptor{
*producer_computation->GetInstructionWithName("param0"),
producer.get()},
HloInstructionAdaptor{
*producer_computation->GetInstructionWithName("add"),
producer.get()}));
consumer_fusion_instr->MergeFusionInstructionIntoMultiOutput(
producer_fusion_instr);
EXPECT_THAT(consumer_fusion_instr->fused_expression_root(),
GmockMatch(m::Tuple(
m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Divide(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloTraversalTest, HloFindUseChain) {
auto module = ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
negate = f32[] negate(p0)
log = f32[] log(p0)
sum = f32[] add(p0, log)
exp = f32[] exponential(p1)
ROOT call = f32[] custom-call(negate, exp, sum), custom_call_target="it"
}
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT fusion = f32[] fusion(p0, p1), kind=kLoop, calls=fusion
}
)")
.value();
auto* fusion_computation = module->GetComputationWithName("fusion");
auto fusion = HloFusionAdaptor::ForComputation(fusion_computation);
auto get = [&](absl::string_view name) {
return HloInstructionAdaptor{
*fusion_computation->GetInstructionWithName(name), fusion.get()};
};
auto p0 = get("p0");
auto p1 = get("p1");
auto log = get("log");
auto sum = get("sum");
auto negate = get("negate");
auto exp = get("exp");
auto call = get("call");
EXPECT_THAT(HloFindUseChain(p0, p0), ElementsAre(p0));
EXPECT_THAT(HloFindUseChain(p0, p1), IsEmpty());
EXPECT_THAT(HloFindUseChain(p0, call), ElementsAre(p0, negate, call));
EXPECT_THAT(HloFindUseChain(p0, sum), ElementsAre(p0, log, sum));
EXPECT_THAT(HloFindUseChain(p1, exp), ElementsAre(p1, exp));
EXPECT_THAT(HloFindUseChain(negate, exp), IsEmpty());
EXPECT_THAT(HloFindUseChain(call, p0), IsEmpty());
}
}
}
} | 2,043 |
#ifndef XLA_SERVICE_GPU_GEMM_FUSION_H_
#define XLA_SERVICE_GPU_GEMM_FUSION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
bool ShouldTritonHandleGEMM(HloDotInstruction&,
const se::GpuComputeCapability&);
class GemmFusion : public HloModulePass {
public:
explicit GemmFusion(const se::GpuComputeCapability& gpu_version)
: gpu_version_(gpu_version) {}
absl::string_view name() const override { return "triton-gemm-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::GpuComputeCapability gpu_version_;
};
}
}
#endif
#include "xla/service/gpu/gemm_fusion.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_padding_requirements.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using triton_fusion::CombineDotRequirements;
using triton_fusion::DimensionOrder;
using triton_fusion::DimOrderMap;
using triton_fusion::DimOrdersAndReqs;
using triton_fusion::DimOrdersAndReqsOrError;
using triton_fusion::DotProperties;
using triton_fusion::DotRequirements;
using triton_fusion::DotRequirementsOrError;
using triton_fusion::FusionContext;
using triton_fusion::GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible;
using triton_fusion::TransformDirection;
class AdjacencyList {
public:
using NodeId = int64_t;
NodeId AddNode() {
adj_.emplace_back();
return adj_.size() - 1;
}
const std::vector<NodeId>& GetOutNeighbors(NodeId node_id) const {
return adj_.at(node_id);
}
void ReserveSpaceForOutNeighbors(NodeId node_id, size_t count) {
adj_.at(node_id).reserve(count);
}
void AddArc(NodeId from, NodeId to) { adj_.at(from).push_back(to); }
NodeId GetRoot() const {
CHECK(!adj_.empty());
return 0;
}
private:
std::vector<std::vector<NodeId>> adj_;
};
struct HloAndDimOrder {
const HloInstruction* original_hlo = nullptr;
DimensionOrder dim_order;
};
struct HloAndIterSpec {
const HloInstruction* original_hlo;
TensorIterationSpec iter_spec;
auto ToTuple() const { return std::make_tuple(original_hlo, iter_spec); }
bool operator==(const HloAndIterSpec& other) const {
return ToTuple() == other.ToTuple();
}
template <typename H>
friend H AbslHashValue(H h, const HloAndIterSpec& key) {
return H::combine(std::move(h), key.ToTuple());
}
};
struct NodeFusionPlan {
const HloInstruction* original_hlo = nullptr;
bool should_fuse = false;
};
struct FusionPlan {
AdjacencyList graph;
absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> map;
};
struct FusionPlanAndRequirements {
FusionPlan fusion_plan;
DotRequirements requirements;
};
struct HlosAndRequirements {
const HloInstruction* original_hlo = nullptr;
const HloInstruction* fused_hlo = nullptr;
DotRequirements requirements;
};
HloInstruction& FuseDot(const HloDotInstruction& dot,
const HloInstruction& fused_lhs,
const HloInstruction& fused_rhs,
std::optional<const HloInstruction*> fused_meta,
HloComputation::Builder& builder
) {
VLOG(3) << "Fusing " << dot.ToString();
std::vector<HloInstruction*> hlo_new_operands = {
const_cast<HloInstruction*>(&fused_lhs),
const_cast<HloInstruction*>(&fused_rhs)};
if (fused_meta.has_value()) {
hlo_new_operands.push_back(const_cast<HloInstruction*>(fused_meta.value()));
}
return *builder.AddInstruction(
dot.CloneWithNewOperands(dot.shape(), hlo_new_operands));
}
int64_t NumAddedParameters(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kParameter ||
(hlo.opcode() == HloOpcode::kConstant &&
!ShapeUtil::IsScalar(hlo.shape()))) {
return 0;
}
return hlo.operand_count() - 1;
}
std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqs(
const HloInstruction& hlo, const DimensionOrder& dim_order,
const DotProperties& properties,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements) {
DimOrdersAndReqsOrError dim_orders_and_new_reqs =
GetPropagatedDimOrdersAndRequirements(
hlo, dim_order, TransformDirection::kOutputToInput, properties);
if (!std::holds_alternative<DimOrdersAndReqs>(dim_orders_and_new_reqs)) {
return std::nullopt;
}
DotRequirementsOrError combined_reqs = CombineDotRequirements(
requirements,
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements);
if (!std::holds_alternative<DotRequirements>(combined_reqs)) {
return std::nullopt;
}
return DimOrdersAndReqs{
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders,
std::get<DotRequirements>(combined_reqs)};
}
std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqsIfProfitable(
const HloInstruction& hlo, const DimensionOrder& dim_order,
const DotProperties& properties,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements) {
DimOrdersAndReqsOrError dim_orders_and_new_reqs =
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
hlo, TransformDirection::kOutputToInput,
std::nullopt, dim_order, gpu_version,
properties);
if (!std::holds_alternative<DimOrdersAndReqs>(dim_orders_and_new_reqs)) {
return std::nullopt;
}
DotRequirementsOrError combined_reqs = CombineDotRequirements(
requirements,
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements);
if (!std::holds_alternative<DotRequirements>(combined_reqs)) {
return std::nullopt;
}
return DimOrdersAndReqs{
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders,
std::get<DotRequirements>(combined_reqs)};
}
std::optional<DimOrdersAndReqs> GetUserDimOrdersAndCombinedReqsIfProfitable(
const HloInstruction& hlo, const DimensionOrder& hlo_dim_order,
const HloInstruction& user, const DotProperties& properties,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements) {
DimOrdersAndReqsOrError dim_orders_and_new_reqs =
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
user, TransformDirection::kInputToOutput, user.operand_index(&hlo),
hlo_dim_order, gpu_version, properties);
if (!std::holds_alternative<DimOrdersAndReqs>(dim_orders_and_new_reqs)) {
return std::nullopt;
}
DotRequirementsOrError combined_reqs = CombineDotRequirements(
requirements,
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements);
if (!std::holds_alternative<DotRequirements>(combined_reqs)) {
return std::nullopt;
}
return DimOrdersAndReqs{
std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders,
std::get<DotRequirements>(combined_reqs)};
}
FusionPlanAndRequirements BuildFusionPlanTowardOperands(
const HloInstruction& root_hlo, const DimensionOrder& root_dim_order,
const std::optional<int>& max_params,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties,
const DotRequirements& requirements_so_far) {
CHECK(!max_params.has_value() || max_params.value() >= 1);
AdjacencyList graph;
absl::flat_hash_map<AdjacencyList::NodeId, HloAndDimOrder>
hlo_and_dim_order_map;
absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> fusion_plan_map;
absl::flat_hash_map<HloAndIterSpec, AdjacencyList::NodeId> node_reuse_map;
DotRequirements combined_reqs = requirements_so_far;
auto get_or_create_fusion_node =
[&](const HloInstruction& hlo, const DimensionOrder& dim_order,
bool* is_new_node = nullptr) -> AdjacencyList::NodeId {
HloAndIterSpec reuse_key = {&hlo, dim_order.ToTensorIterationSpec()};
if (auto it = node_reuse_map.find(reuse_key); it != node_reuse_map.end()) {
if (is_new_node != nullptr) {
*is_new_node = false;
}
return it->second;
}
AdjacencyList::NodeId node_id = graph.AddNode();
CHECK(hlo_and_dim_order_map.insert({node_id, {&hlo, dim_order}}).second);
CHECK(node_reuse_map.insert({reuse_key, node_id}).second);
if (is_new_node != nullptr) {
*is_new_node = true;
}
return node_id;
};
AdjacencyList::NodeId root =
get_or_create_fusion_node(root_hlo, root_dim_order);
absl::flat_hash_set<AdjacencyList::NodeId> inputs({root});
std::queue<AdjacencyList::NodeId> queue({root});
int64_t num_requeued = 0;
while (queue.size() > num_requeued) {
AdjacencyList::NodeId node_id = queue.front();
queue.pop();
const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id);
const HloInstruction& original_hlo = *hlo_and_dim_order.original_hlo;
const DimensionOrder& dim_order = hlo_and_dim_order.dim_order;
if (max_params.has_value() &&
inputs.size() + NumAddedParameters(original_hlo) > max_params.value()) {
queue.push(node_id);
++num_requeued;
continue;
}
num_requeued = 0;
if (original_hlo.opcode() == HloOpcode::kParameter) {
CHECK(fusion_plan_map
.insert({node_id, {&original_hlo, false}})
.second);
continue;
}
auto opt_result = GetOperandDimOrdersAndCombinedReqsIfProfitable(
original_hlo, dim_order, properties, gpu_version, combined_reqs);
if (!opt_result.has_value()) {
CHECK(fusion_plan_map
.insert({node_id, {&original_hlo, false}})
.second);
continue;
}
const DimOrderMap operand_dim_orders = std::move(opt_result->dim_orders);
combined_reqs = std::move(opt_result->requirements);
inputs.erase(node_id);
graph.ReserveSpaceForOutNeighbors(node_id, original_hlo.operand_count());
for (int64_t i = 0; i < original_hlo.operand_count(); ++i) {
const HloInstruction& operand = *original_hlo.operand(i);
const DimensionOrder& operand_dim_order = operand_dim_orders.at(&operand);
bool is_new_node = false;
AdjacencyList::NodeId operand_node_id =
get_or_create_fusion_node(operand, operand_dim_order, &is_new_node);
graph.AddArc(node_id, operand_node_id);
if (is_new_node) {
VLOG(6) << "Enqueueing " << operand.ToString() << ":"
<< operand_dim_order.ToString();
inputs.insert(operand_node_id);
queue.push(operand_node_id);
}
}
CHECK(
fusion_plan_map.insert({node_id, {&original_hlo, true}})
.second);
}
while (!queue.empty()) {
AdjacencyList::NodeId node_id = queue.front();
queue.pop();
const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id);
CHECK(fusion_plan_map
.insert({node_id,
{hlo_and_dim_order.original_hlo, false}})
.second);
}
return {{std::move(graph), std::move(fusion_plan_map)},
std::move(combined_reqs)};
}
HloInstruction& BuildFusionTowardOperandsImpl(
AdjacencyList::NodeId node_id, const FusionPlan& fusion_plan,
absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*>&
fused_hlo_map,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
if (auto it = fused_hlo_map.find(node_id); it != fused_hlo_map.end()) {
return *it->second;
}
const NodeFusionPlan& node_fusion_plan = fusion_plan.map.at(node_id);
const bool should_fuse = node_fusion_plan.should_fuse;
const HloInstruction& original_hlo = *node_fusion_plan.original_hlo;
HloInstruction* fused_hlo = nullptr;
if (should_fuse) {
HloInstruction::InstructionVector new_operands;
for (AdjacencyList::NodeId operand_id :
fusion_plan.graph.GetOutNeighbors(node_id)) {
new_operands.push_back(&BuildFusionTowardOperandsImpl(
operand_id, fusion_plan, fused_hlo_map, builder, fusion_params));
}
fused_hlo = builder.AddInstruction(
original_hlo.CloneWithNewOperands(original_hlo.shape(), new_operands));
} else {
fusion_params.push_back(const_cast<HloInstruction*>(&original_hlo));
fused_hlo = builder.AddInstruction(HloInstruction::CreateParameter(
fusion_params.size() - 1, original_hlo.shape(),
absl::StrCat("parameter_", fusion_params.size() - 1)));
}
CHECK(fused_hlo_map.insert({node_id, fused_hlo}).second);
return *fused_hlo;
}
HloInstruction& BuildFusionTowardOperands(
const FusionPlan& fusion_plan,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*> fused_hlo_map;
return BuildFusionTowardOperandsImpl(fusion_plan.graph.GetRoot(), fusion_plan,
fused_hlo_map, builder, fusion_params);
}
HlosAndRequirements FuseTowardOperands(
const HloInstruction& root_hlo, const DimensionOrder& root_dim_order,
const std::optional<int>& max_params,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties, const DotRequirements& requirements_so_far,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
FusionPlanAndRequirements fusion_plan_and_reqs =
BuildFusionPlanTowardOperands(root_hlo, root_dim_order, max_params,
gpu_version, properties,
requirements_so_far);
HloInstruction& fused_hlo_or_param = BuildFusionTowardOperands(
fusion_plan_and_reqs.fusion_plan, builder, fusion_params);
return HlosAndRequirements{&root_hlo, &fused_hlo_or_param,
fusion_plan_and_reqs.requirements};
}
absl::StatusOr<HlosAndRequirements> FuseDotOperand(
const HloInstruction& dot, int operand_index,
const se::GpuComputeCapability& gpu_version,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
TF_ASSIGN_OR_RETURN(const FusionContext context,
FusionContext::FromDotOperand(dot, operand_index));
const HloInstruction& operand = *dot.operand(operand_index);
return FuseTowardOperands(operand, context.dim_orders().at(&operand),
TritonFusionAnalysis::kMaxParameterPerDotOperand,
gpu_version, context.dot_properties(),
context.requirements(), builder, fusion_params);
}
HlosAndRequirements FuseTowardUsers(
const HloInstruction& hlo, const HloInstruction& fused_hlo,
const DimensionOrder& hlo_dim_order,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties, const DotRequirements& requirements,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
const HlosAndRequirements existing_hlos_and_requirements = {&hlo, &fused_hlo,
requirements};
if (hlo.user_count() != 1) {
return existing_hlos_and_requirements;
}
const HloInstruction& user = *hlo.users()[0];
if (!legacy_triton::IsDistributiveOverAddition(user)) {
return existing_hlos_and_requirements;
}
auto opt_user_result = GetUserDimOrdersAndCombinedReqsIfProfitable(
hlo, hlo_dim_order, user, properties, gpu_version, requirements);
if (!opt_user_result.has_value()) {
return existing_hlos_and_requirements;
}
DimensionOrder user_dim_order = opt_user_result->dim_orders.at(&user);
DotRequirements combined_requirements = opt_user_result->requirements;
HloInstruction::InstructionVector new_operands;
if (user.operand_count() == 1) {
new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo));
} else {
auto opt_operand_result = GetOperandDimOrdersAndCombinedReqs(
user, user_dim_order, properties, gpu_version, combined_requirements);
if (!opt_operand_result.has_value()) {
return existing_hlos_and_requirements;
}
DimOrderMap operand_dim_orders = opt_operand_result->dim_orders;
combined_requirements = opt_operand_result->requirements;
for (int i = 0; i < user.operand_count(); ++i) {
const HloInstruction& operand = *user.operand(i);
if (&operand == &hlo) {
new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo));
} else {
HlosAndRequirements hlos_and_requirements = FuseTowardOperands(
operand, operand_dim_orders.at(&operand),
std::nullopt, gpu_version, properties,
combined_requirements, builder, fusion_params);
new_operands.push_back(
const_cast<HloInstruction*>(hlos_and_requirements.fused_hlo));
combined_requirements = hlos_and_requirements.requirements;
}
}
}
const HloInstruction& fused_user = *builder.AddInstruction(
user.CloneWithNewOperands(user.shape(), new_operands));
return FuseTowardUsers(user, fused_user, user_dim_order, gpu_version,
properties, combined_requirements, builder,
fusion_params);
}
HlosAndRequirements FuseDotOutput(
const HloInstruction& dot, const HloInstruction& fused_dot,
const se::GpuComputeCapability& gpu_version,
const DotRequirements& requirements,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_params
) {
const auto context =
FusionContext::FromDotOutput(dot, 1, requirements);
return FuseTowardUsers(dot, fused_dot, context.dim_orders().at(&dot),
gpu_version, context.dot_properties(),
context.requirements(), builder, fusion_params);
}
absl::StatusOr<FusionDecision> CreateDotFusion(
const HloDotInstruction& dot, const se::GpuComputeCapability gpu_version,
HloComputation::Builder& builder,
std::vector<HloInstruction*>& fusion_inputs,
HloInstruction** fusion_output_ptr) {
VLOG(5) << dot.ToString();
if (CodegenDecision is_supported =
legacy_triton::IsTritonSupportedInstruction(dot, gpu_version);
!is_supported) {
VLOG(3) << is_supported.Explain();
return is_supported;
}
if (dot.sparse_operands()) {
const SparsityDescriptor& descriptor = dot.sparsity().front();
if (dot.sparse_operands() != 1 || descriptor.index() != 0) {
return InvalidArgument("Sparsity is only supported on left operand");
}
if (descriptor.type() != SparsityType::SPARSITY_STRUCTURED_N_M ||
descriptor.n() != 2 || descriptor.m() != 4) {
return InvalidArgument("Only 2:4 structured sparsity is supported");
}
CHECK_EQ(descriptor.dimension(), dot.operand(0)->shape().rank() - 1);
}
TF_ASSIGN_OR_RETURN(HlosAndRequirements lhs_hlos_and_reqs,
FuseDotOperand(dot, 0, gpu_version,
builder, fusion_inputs));
TF_ASSIGN_OR_RETURN(HlosAndRequirements rhs_hlos_and_reqs,
FuseDotOperand(dot, 1, gpu_version,
builder, fusion_inputs));
std::optional<const HloInstruction*> meta_hlo;
if (dot.sparse_operands()) {
TF_ASSIGN_OR_RETURN(HlosAndRequirements meta_hlos_and_reqs,
FuseDotOperand(dot, 2, gpu_version,
builder, fusion_inputs));
meta_hlo.emplace(meta_hlos_and_reqs.fused_hlo);
}
HloInstruction& fused_dot =
FuseDot(dot, *lhs_hlos_and_reqs.fused_hlo, *rhs_hlos_and_reqs.fused_hlo,
meta_hlo, builder);
HlosAndRequirements fused_output_and_reqs =
FuseDotOutput(dot, fused_dot, gpu_version, lhs_hlos_and_reqs.requirements,
builder, fusion_inputs);
if (fusion_output_ptr != nullptr) {
*fusion_output_ptr =
const_cast<HloInstruction*>(fused_output_and_reqs.original_hlo);
}
const PrecisionConfig::Algorithm algorithm =
dot.precision_config().algorithm();
if (algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6 ||
algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3 ||
dot.GetModule()->config().debug_options().xla_gpu_triton_gemm_any() ||
dot.sparse_operands()) {
return FusionDecision{};
}
bool is_pure_matmul = true;
(void)builder.ForEachInstruction([&](const HloInstruction* fused_hlo) {
static constexpr std::array<HloOpcode, 4> kPureOpcodes = {
HloOpcode::kBitcast, HloOpcode::kDot, HloOpcode::kParameter,
HloOpcode::kReshape};
if (absl::c_find(kPureOpcodes, fused_hlo->opcode()) == kPureOpcodes.end()) {
is_pure_matmul = false;
return absl::CancelledError();
}
return absl::OkStatus();
});
if (!is_pure_matmul) {
return FusionDecision{};
}
return "No profitable operations to fuse.";
}
class GemmFusionVisitor : public DfsHloRewriteVisitor {
public:
explicit GemmFusionVisitor(const se::GpuComputeCapability& gpu_version)
: gpu_version_(gpu_version) {}
absl::Status HandleDot(HloInstruction* dot) override {
CHECK_EQ(dot->opcode(), HloOpcode::kDot);
int64_t gemm_rewrite_size_threshold =
dot->GetModule()
->config()
.debug_options()
.xla_gpu_gemm_rewrite_size_threshold();
TF_ASSIGN_OR_RETURN(bool is_matmul_tiny,
IsMatrixMultiplicationTooSmallForRewriting(
*dot, gemm_rewrite_size_threshold));
if (is_matmul_tiny && IsDotSupportedByClassicalEmitters(*dot)) {
return absl::OkStatus();
}
std::string fusion_name = absl::StrCat("gemm_fusion_", dot->name());
HloComputation::Builder builder(absl::StrCat(fusion_name, "_computation"));
std::vector<HloInstruction*> fusion_inputs;
HloInstruction* fusion_output = nullptr;
TF_ASSIGN_OR_RETURN(
const FusionDecision should_fuse,
CreateDotFusion(*Cast<HloDotInstruction>(dot), gpu_version_, builder,
fusion_inputs, &fusion_output));
if (builder.last_added_instruction() == nullptr) {
return absl::OkStatus();
}
if (std::holds_alternative<se::CudaComputeCapability>(gpu_version_)) {
if (!CublasRequiresPadding(
*Cast<HloDotInstruction>(dot),
std::get<se::CudaComputeCapability>(gpu_version_)) &&
!should_fuse) {
return absl::OkStatus();
}
}
HloComputation* computation =
dot->GetModule()->AddComputationAndUnifyNamesAndIds(builder.B | #include "xla/service/gpu/gemm_fusion.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/cublas_padding_requirements.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
namespace m = ::xla::match;
class GemmFusionTest : public HloTestBase {
public:
GemmFusionTest()
: HloTestBase(true,
false) {}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_any(false);
debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0);
return debug_options;
}
se::GpuComputeCapability gpu_version_{
se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0}};
void MatchHloModule(HloModule& module, absl::string_view pattern) {
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_result,
RunFileCheck(module.ToString(), pattern));
EXPECT_TRUE(filecheck_result);
}
};
TEST_F(GemmFusionTest, TransposeSubdimensionGroup) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = f32[32,3] parameter(0)
t1 = f32[3,32] transpose(p0), dimensions={1,0}
r1 = f32[3,8,4] reshape(t1)
r0 = f32[3,32] reshape(r1)
p1 = f16[32,7] parameter(1)
c1 = f32[32,7] convert(p1)
ROOT d = f32[3,7] dot(r0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, UnsupportedTransposeIsNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f16[1,512,8,1024]{3,1,0,2} parameter(0)
c = f16[1,512,8,1024]{3,2,1,0} copy(p0)
b = f16[4096,1024]{1,0} bitcast(c)
p1 = f16[128,1024]{1,0} parameter(1)
ROOT d = f16[4096,128]{1,0} dot(b, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})")
.value();
EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value());
}
TEST_F(GemmFusionTest, BitcastChain) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = s8[60,5] parameter(0)
r0 = s8[3,20,5] reshape(p0)
c0 = f16[3,20,5] convert(r0)
p1 = f16[3,200] parameter(1)
r12 = f16[600] reshape(p1)
r11 = f16[30,20] reshape(r12)
r1 = f16[3,10,20] reshape(r11)
ROOT d = f16[3,5,10] dot(c0, r1),
lhs_contracting_dims={1}, rhs_contracting_dims={2},
lhs_batch_dims={0}, rhs_batch_dims={0}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, SplitDimensionTwice) {
auto module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = s8[4,2,32,4,2] parameter(0)
r1 = s8[8,32,8] reshape(p0)
t1 = s8[32,8,8] transpose(r1), dimensions={1,0,2}
r0 = s8[32,64] reshape(t1)
p1 = s8[32,32] parameter(1)
c0 = f16[32,32] convert(p1)
ROOT d = f16[64,32] dot(r0, c0),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, DoNotTriggerOnUnsupportedOutputConversions) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f16[128,256] parameter(0)
p1 = f16[256,512] parameter(1)
r = f16[128,512] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT c = u8[128,512] convert(r)
})"));
EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value());
}
TEST_F(GemmFusionTest, FuseDotWithTrivialNoncontractingDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = s8[60,5] parameter(0)
r0 = s8[3,20,5] reshape(p0)
c0 = f16[3,20,5] convert(r0)
p1 = f16[3,1,20] parameter(1)
ROOT d = f16[3,5,1] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={2},
lhs_batch_dims={0}, rhs_batch_dims={0}
})")
.value();
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, HandleDotIfCublasRequiresPadding) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
ENTRY e {
p0 = f16[5,3] parameter(0)
p1 = f16[5,7] parameter(1)
ROOT d = f16[3,7] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(CublasRequiresPadding(
*xla::Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
cc));
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, FuseSliceOfParameterWithOtherUsers) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[97,121] parameter(0)
s0 = f32[7,101] slice(p0), slice={[3:10], [10:111]}
p1 = f32[101,16] parameter(1)
d = f32[16,7] dot(p1, s0),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
s1 = f32[3,33] slice(p0), slice={[10:13], [20:53]}
ROOT t = tuple(d, s1)
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, DoNotFuseSliceOfMixedDimensions) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = bf16[768,64] parameter(0)
s0 = bf16[768,32] slice(p0), slice={[0:768], [0:32]}
b0 = bf16[256,3,32] reshape(s0)
b1 = bf16[256,96] reshape(b0)
p1 = bf16[256,96] parameter(1)
ROOT d = bf16[96,96] dot(b1, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, DoNotFuseSlicesOfNonMajorFragments) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[2,2,256,256] parameter(0)
s0 = f32[1,1,256,256] slice(p0),
slice={[0:1], [0:1], [0:256], [0:256]}
r0 = f32[256,256] reshape(s0)
p1 = f16[2,2,256,256] parameter(1)
s1 = f16[1,1,256,256] slice(p1),
slice={[0:1], [0:1], [0:256], [0:256]}
r1 = f16[256,256] reshape(s1)
ROOT d = f32[256,256] dot(r0, r1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, DynamicSliceIsFused) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
dot_lhs = f32[2,18] parameter(0)
dynamic_slice_input = f32[2,64,2] parameter(1)
start_index0 = s32[] parameter(2)
start_index1_2 = s32[] constant(0)
dynamic_slice = f32[1,64,2] dynamic-slice(dynamic_slice_input, start_index0, start_index1_2, start_index1_2),
dynamic_slice_sizes={1,64,2}
reshape = f32[64,2] reshape(dynamic_slice)
ROOT dot = f16[18,64] dot(dot_lhs, reshape),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch((m::Fusion(m::Parameter(), m::Parameter(),
m::Parameter(), m::Constant()))));
}
TEST_F(GemmFusionTest, DynamicSlicesAreFusedEvenIfTheyShareIndices) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[2,64,2] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ds0 = f32[1,64,2] dynamic-slice(p0, p1, p2, p3), dynamic_slice_sizes={1,64,2}
a = f32[64,2] reshape(ds0)
ds1 = f32[1,64,2] dynamic-slice(p0, p3, p2, p1), dynamic_slice_sizes={1,64,2}
b = f32[64,2] reshape(ds1)
ROOT d = f16[64,64] dot(a, b),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(),
m::Parameter(), m::Parameter(), m::Parameter(),
m::Parameter(), m::Parameter()))));
}
TEST_F(GemmFusionTest, DoNotFuseDynamicSliceOfNonMajorFragments) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
dot_lhs = f32[2,4]{1,0} parameter(0)
dynamic_slice_input = f32[4,5,2]{2,1,0} parameter(1)
c0 = s32[] constant(0)
c2 = s32[] constant(2)
dynamic_slice = f32[4,1,2]{2,1,0} dynamic-slice(dynamic_slice_input, c0, c2, c0),
dynamic_slice_sizes={4,1,2}
reshape = f32[4,2]{1,0} reshape(dynamic_slice)
ROOT dot = f32[4,4]{1,0} dot(dot_lhs, reshape),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
TEST_F(GemmFusionTest, CanFuseDynamicSliceOfContractingDimIfItIsMajor) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
dot_lhs = f32[2,4]{1,0} parameter(0)
dynamic_slice_input = f32[5,5]{1,0} parameter(1)
start_index0 = s32[] constant(2)
start_index1 = s32[] constant(0)
dynamic_slice = f32[2,5]{1,0} dynamic-slice(dynamic_slice_input, start_index0, start_index1),
dynamic_slice_sizes={2,5}
ROOT d = f32[4,5]{1,0} dot(dot_lhs, dynamic_slice),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch((m::Fusion(m::Parameter(), m::Parameter(),
m::Constant(), m::Constant()))));
}
TEST_F(GemmFusionTest, SliceToDegenerateIsSkipped) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p = f32[3] parameter(0)
s = f32[1] slice(p), slice={[2:3]}
r = f32[] reshape(s)
b = f32[3,3] broadcast(r), dimensions={}
ROOT d = f32[3,3] dot(b, b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
ASSERT_TRUE(GemmFusion(cc).Run(module.get()).value());
MatchHloModule(*module, R"(
; CHECK-NOT: slice
; CHECK: ENTRY
; CHECK: slice
)");
}
TEST_F(GemmFusionTest, MultipleUsesAreHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
c = f32[] constant(1)
b = f32[6,8] broadcast(c), dimensions={}
p0 = f32[6,8] parameter(0)
a1 = f32[6,8] add(p0, b)
e = f32[6,8] exponential(a1)
a2 = f32[6,8] add(e, b)
d = f32[6,8] divide(b, a2)
p2 = f16[8,6] parameter(1)
cv = f32[8,6] convert(p2)
ROOT r = f32[6,6] dot(d, cv),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, BinaryElementwiseOfBroadcastIsFused) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p2 = f32[3072] parameter(2)
b = f32[8192,3072] broadcast(p2), dimensions={1}
p0 = f16[8192,3072] parameter(0)
p0c = f32[8192,3072] convert(p0)
a = f32[8192,3072] add(p0c, b)
p1 = f32[3072,768] parameter(1)
ROOT r = f32[8192,768] dot(a, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionTest, BinaryElementwiseOfUnsupportedBroadcastIsNotFused) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p2 = f32[768] parameter(2)
b = f32[8192,768,4] broadcast(p2), dimensions={1}
s = f32[8192,3072] bitcast(b)
p0 = f16[8192,3072] parameter(0)
p0c = f32[8192,3072] convert(p0)
a = f32[8192,3072] add(p0c, s)
p1 = f32[3072,768] parameter(1)
ROOT r = f32[8192,768] dot(a, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})"));
const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0};
EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value());
}
class GemmFusionLevel2Test : public GemmFusionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GemmFusionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_fusion_level(2);
return debug_options;
}
};
TEST_F(GemmFusionLevel2Test, ReshapeToScalarIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = s8[5,3] parameter(0)
c = f16[5,3] convert(p0)
p1 = f16[1] parameter(1)
r = f16[] reshape(p1)
b = f16[5,7] broadcast(r)
ROOT d = f16[3,7] dot(c, b),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionLevel2Test, DoNotFuseIncompatibleDimensionSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
p1 = s8[5,7,2,3]{3,2,1,0} parameter(1)
t1 = s8[7,5,2,3]{3,2,1,0} transpose(p1), dimensions={1,0,2,3}
r1 = s8[7,30]{1,0} reshape(t1)
cvt = f16[7,30]{1,0} convert(r1)
p2 = f16[2,7,5,3]{3,2,1,0} parameter(2)
t2 = f16[7,2,5,3]{3,2,1,0} transpose(p2), dimensions={1,0,2,3}
r2 = f16[7,30]{1,0} reshape(t2)
a = f16[7,30]{1,0} add(cvt, r2)
p0 = f16[7,79]{1,0} parameter(0)
ROOT dot = f16[30,79]{1,0} dot(a, p0),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Transpose(), m::Parameter(), m::Parameter())));
}
TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParameters) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
tmp_0 = f32[] constant(1)
tmp_1 = f32[3,49]{1,0} broadcast(tmp_0), dimensions={}
tmp_2 = f32[3,49]{1,0} parameter(6)
tmp_3 = f32[] constant(0)
tmp_4 = f32[3,49]{1,0} broadcast(tmp_3), dimensions={}
tmp_5 = pred[3,49]{1,0} compare(tmp_2, tmp_4), direction=GT
tmp_6 = f32[3,49]{1,0} convert(tmp_5)
tmp_7 = f32[3,49]{1,0} subtract(tmp_1, tmp_6)
tmp_8 = s32[] parameter(13)
tmp_9 = f32[] convert(tmp_8)
tmp_10 = f32[] maximum(tmp_9, tmp_0)
tmp_11 = f32[] divide(tmp_3, tmp_10)
tmp_12 = f32[3,49]{1,0} broadcast(tmp_11), dimensions={}
tmp_13 = pred[3,49]{1,0} parameter(7)
tmp_14 = pred[3,49]{1,0} parameter(10)
tmp_15 = pred[3,49]{1,0} and(tmp_13, tmp_14)
tmp_16 = f32[3,49]{1,0} convert(tmp_15)
tmp_17 = f32[3,49]{1,0} multiply(tmp_12, tmp_16)
tmp_18 = f32[3,49]{1,0} negate(tmp_17)
tmp_19 = f32[3,49]{1,0} multiply(tmp_7, tmp_18)
tmp_20 = f32[3,49]{1,0} parameter(19)
tmp_21 = f32[3,49]{1,0} subtract(tmp_1, tmp_20)
tmp_22 = f32[3,49]{1,0} divide(tmp_19, tmp_21)
tmp_23 = f32[3,49]{1,0} negate(tmp_22)
tmp_24 = f32[3,49]{1,0} negate(tmp_6)
tmp_25 = f32[3,49]{1,0} multiply(tmp_24, tmp_17)
tmp_26 = f32[3,49]{1,0} divide(tmp_25, tmp_20)
tmp_27 = f32[3,49]{1,0} add(tmp_23, tmp_26)
tmp_28 = f32[3,49]{1,0} parameter(18)
tmp_29 = f32[3,49]{1,0} multiply(tmp_27, tmp_28)
tmp_30 = f32[3,49]{1,0} parameter(17)
tmp_31 = f32[3,49]{1,0} multiply(tmp_29, tmp_30)
tmp_32 = f32[3,49]{1,0} parameter(16)
tmp_33 = f32[3,49]{1,0} multiply(tmp_31, tmp_32)
tmp_34 = f32[3,49]{1,0} parameter(15)
tmp_35 = f32[3,49]{1,0} add(tmp_33, tmp_34)
tmp_36 = f32[3,49]{1,0} parameter(14)
tmp_37 = f32[3,49]{1,0} add(tmp_35, tmp_36)
tmp_38 = f32[1,1]{1,0} constant({ {0} })
tmp_39 = f32[1,1]{1,0} broadcast(tmp_38), dimensions={0,1}
tmp_40 = f32[] reshape(tmp_39)
tmp_41 = f32[3,32]{1,0} broadcast(tmp_40), dimensions={}
tmp_42 = u32[48]{0} parameter(11)
tmp_43 = u32[48]{0} parameter(5)
tmp_44 = u32[96]{0} concatenate(tmp_42, tmp_43), dimensions={0}
tmp_45 = u32[3,32]{1,0} reshape(tmp_44)
tmp_46 = u32[96]{0} reshape(tmp_45)
tmp_47 = u32[] constant(1)
tmp_48 = u32[3,32]{1,0} broadcast(tmp_47), dimensions={}
tmp_49 = u32[96]{0} reshape(tmp_48)
tmp_50 = u32[96]{0} shift-right-logical(tmp_46, tmp_49)
tmp_51 = u32[3,32]{1,0} reshape(tmp_50)
tmp_52 = u32[3,32]{1,0} or(tmp_51, tmp_48)
tmp_53 = f32[3,32]{1,0} bitcast-convert(tmp_52)
tmp_54 = f32[3,32]{1,0} broadcast(tmp_0), dimensions={}
tmp_55 = f32[3,32]{1,0} subtract(tmp_53, tmp_54)
tmp_56 = f32[1,1]{1,0} constant({ {1} })
tmp_57 = f32[1,1]{1,0} broadcast(tmp_56), dimensions={0,1}
tmp_58 = f32[] reshape(tmp_57)
tmp_59 = f32[3,32]{1,0} broadcast(tmp_58), dimensions={}
tmp_60 = f32[3,32]{1,0} multiply(tmp_55, tmp_59)
tmp_61 = f32[3,32]{1,0} add(tmp_60, tmp_41)
tmp_62 = f32[3,32]{1,0} maximum(tmp_41, tmp_61)
tmp_63 = f32[3,32]{1,0} broadcast(tmp_3), dimensions={}
tmp_64 = pred[3,32]{1,0} compare(tmp_62, tmp_63), direction=LT
tmp_65 = f32[3,32]{1,0} convert(tmp_64)
tmp_66 = f32[3,49]{1,0} parameter(9)
tmp_67 = f32[49]{0} parameter(4)
tmp_68 = f32[3,49]{1,0} broadcast(tmp_67), dimensions={1}
tmp_69 = f32[3,49]{1,0} add(tmp_66, tmp_68)
tmp_70 = f32[1,49]{1,0} parameter(12)
tmp_71 = f32[1,49]{1,0} broadcast(tmp_0), dimensions={}
tmp_72 = f32[1,49]{1,0} divide(tmp_70, tmp_71)
tmp_73 = f32[1,49]{1,0} broadcast(tmp_72), dimensions={0,1}
tmp_74 = f32[49]{0} reshape(tmp_73)
tmp_75 = f32[3,49]{1,0} broadcast(tmp_74), dimensions={1}
tmp_76 = f32[3,49]{1,0} subtract(tmp_69, tmp_75)
tmp_77 = f32[1,49]{1,0} parameter(3)
tmp_78 = f32[1,49]{1,0} parameter(8)
tmp_79 = f32[1,49]{1,0} divide(tmp_78, tmp_71)
tmp_80 = f32[1,49]{1,0} multiply(tmp_72, tmp_72)
tmp_81 = f32[1,49]{1,0} subtract(tmp_79, tmp_80)
tmp_82 = f32[1,49]{1,0} add(tmp_81, tmp_71)
tmp_83 = f32[1,49]{1,0} rsqrt(tmp_82)
tmp_84 = f32[1,49]{1,0} multiply(tmp_77, tmp_83)
tmp_85 = f32[1,49]{1,0} broadcast(tmp_84), dimensions={0,1}
tmp_86 = f32[49]{0} reshape(tmp_85)
tmp_87 = f32[3,49]{1,0} broadcast(tmp_86), dimensions={1}
tmp_88 = f32[3,49]{1,0} multiply(tmp_76, tmp_87)
tmp_89 = f32[1,49]{1,0} parameter(2)
tmp_90 = f32[1,49]{1,0} broadcast(tmp_89), dimensions={0,1}
tmp_91 = f32[49]{0} reshape(tmp_90)
tmp_92 = f32[3,49]{1,0} broadcast(tmp_91), dimensions={1}
tmp_93 = f32[3,49]{1,0} add(tmp_88, tmp_92)
tmp_94 = f32[49,32]{1,0} parameter(1)
tmp_95 = f32[3,32]{1,0} dot(tmp_93, tmp_94), lhs_contracting_dims={1}, rhs_contracting_dims={0}
tmp_96 = f32[32]{0} parameter(0)
tmp_97 = f32[3,32]{1,0} broadcast(tmp_96), dimensions={1}
tmp_98 = f32[3,32]{1,0} add(tmp_95, tmp_97)
tmp_99 = f32[3,32]{1,0} multiply(tmp_65, tmp_98)
tmp_100 = f32[3,32]{1,0} divide(tmp_99, tmp_63)
tmp_101 = f32[3,32]{1,0} maximum(tmp_100, tmp_63)
ROOT tmp_102 = f32[49,32]{1,0} dot(tmp_37, tmp_101), lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kFusion);
EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(),
HloInstruction::FusionKind::kCustom);
EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(),
TritonFusionAnalysis::kMaxParameterPerDotOperand * 2);
}
TEST_F(GemmFusionLevel2Test,
DoNotFuseTooManyParametersWhenAnInstructionWouldAddMultipleParameters) {
static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4,
"We have to update this test.");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[3,49]{1,0} parameter(0)
b = f32[3,49]{1,0} parameter(1)
c = pred[3,49]{1,0} parameter(2)
d = f32[3,49]{1,0} parameter(3)
e = f32[3,49]{1,0} parameter(4)
add0 = f32[3,49]{1,0} add(a, b)
select = f32[3,49]{1,0} select(c, d, e)
add1 = f32[3,49]{1,0} add(add0, select)
f = f32[3,32]{1,0} parameter(5)
ROOT tmp_102 = f32[49,32]{1,0} dot(add1, f), lhs_contracting_dims={0}, rhs_contracting_dims={0}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kFusion);
EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(),
HloInstruction::FusionKind::kCustom);
EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(),
TritonFusionAnalysis::kMaxParameterPerDotOperand + 1);
}
TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParametersForConcat) {
static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4,
"We have to update this test.");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[3,3]{1,0} parameter(0)
b = f32[3,3]{1,0} parameter(1)
c = f32[3,3]{1,0} parameter(2)
d = f32[3,3]{1,0} parameter(3)
e = f32[3,3]{1,0} parameter(4)
f = f16[3,3]{1,0} parameter(5)
concat = f32[15,3]{1,0} concatenate(a, b, c, d, e), dimensions={0}
convert = f32[3,3]{1,0} convert(f)
ROOT dot = f32[15,3]{1,0} dot(concat, convert), lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kFusion);
EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(),
HloInstruction::FusionKind::kCustom);
EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(),
TritonFusionAnalysis::kMaxParameterPerDotOperand + 1);
}
TEST_F(GemmFusionLevel2Test,
InstructionsReachableFromMultipleOperandsAreHandledCorrectly) {
static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4,
"We have to update this test.");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[2,4]{1,0} parameter(0)
b = f32[2,4]{1,0} parameter(1)
c = f32[2,4]{1,0} parameter(2)
d = f32[2,4]{1,0} parameter(3)
e = f32[2,4]{1,0} parameter(4)
add0 = f32[2,4]{1,0} add(a, b)
add1 = f32[2,4]{1,0} add(add0, c)
add2 = f32[2,4]{1,0} add(add1, d)
add3 = f32[2,4]{1,0} add(add2, e)
ROOT r = f32[2,2]{1,0} dot(add3, add0),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
}
TEST_F(GemmFusionLevel2Test, EachScopeIsFusedToASeparateSubgraph) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[2,4]{1,0} parameter(0)
b = f32[2,4]{1,0} parameter(1)
add = f32[2,4]{1,0} add(a, b)
ROOT r = f32[2,2]{1,0} dot(add, add),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1)
CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]])
CHECK-DAG: %[[P2:.*]] = f32[2,4]{1,0} parameter(2)
CHECK-DAG: %[[P3:.*]] = f32[2,4]{1,0} parameter(3)
CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P2]], f32[2,4]{1,0} %[[P3]])
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]])
CHECK: ENTRY
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1)
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0}
CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]]),
CHECK-SAME: kind=kCustom
CHECK-SAME: __triton_gemm
})");
}
TEST_F(GemmFusionLevel2Test, ParamNodesAreReusedIfTheyHaveTheSameIterSpec) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[2,4]{1,0} parameter(0)
add = f32[2,4]{1,0} add(a, a)
ROOT r = f32[2,2]{1,0} dot(add, add),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]])
CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1)
CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P1]])
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]])
CHECK: ENTRY
CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0)
CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0}
CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]])
CHECK-SAME: kind=kCustom
CHECK-SAME: __triton_gemm
})");
}
TEST_F(GemmFusionLevel2Test, NonParamNodesAreReusedIfTheyHaveTheSameIterSpec) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[4,4]{1,0} parameter(0)
b = f32[4,4]{1,0} parameter(1)
negate = f32[4,4]{1,0} negate(a)
sine = f32[4,4]{1,0} sine(negate)
add = f32[4,4]{1,0} add(negate, sine)
ROOT r = f32[4,4]{1,0} dot(add, b),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1)
CHECK-DAG: %[[NEGATE:.*]] = f32[4,4]{1,0} negate(f32[4,4]{1,0} %[[P0]])
CHECK-DAG: %[[SINE:.*]] = f32[4,4]{1,0} sine(f32[4,4]{1,0} %[[NEGATE]])
CHECK-DAG: %[[ADD:.*]] = f32[4,4]{1,0} add(f32[4,4]{1,0} %[[NEGATE]], f32[4,4]{1,0} %[[SINE]])
CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} dot(f32[4,4]{1,0} %[[ADD]], f32[4,4]{1,0} %[[P1]])
CHECK: ENTRY
CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1)
CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0}
CHECK-SAME: fusion(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P1]])
CHECK-SAME: kind=kCustom
CHECK-SAME: __triton_gemm
})");
}
TEST_F(GemmFusionLevel2Test, NodesAreNotReusedIfTheyHaveDifferentIterSpecs) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
ENTRY e {
a = f32[4,4]{1,0} parameter(0)
b = f32[4,4]{1,0} parameter(1)
tr_a = f32[4,4]{1,0} transpose(a), dimensions={1,0}
add = f32[4,4]{1,0} add(a, tr_a)
ROOT r = f32[4,4]{1,0} dot(add, b),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
})"));
EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value());
MatchHloModule(*module, R"(
CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0)
CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1)
CHECK-DAG: %[[P2:.*]] = f32[4,4]{1,0} parameter(2)
CHECK-DAG: %[[TRANSPOSE:.*]] = f32[4,4]{1, | 2,044 |
#ifndef XLA_SERVICE_GPU_DOUBLE_BUFFER_LOOP_UNROLLING_H_
#define XLA_SERVICE_GPU_DOUBLE_BUFFER_LOOP_UNROLLING_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class DoubleBufferLoopUnrolling : public HloModulePass {
public:
enum class UnrollStrategy { kDoubleBuffer, kFullUnroll };
explicit DoubleBufferLoopUnrolling(
UnrollStrategy unroll_strategy = UnrollStrategy::kDoubleBuffer)
: unroll_strategy_(unroll_strategy) {};
~DoubleBufferLoopUnrolling() override = default;
absl::string_view name() const override {
return "loop-double-buffer-transformer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
UnrollStrategy unroll_strategy_;
};
}
}
#endif
#include "xla/service/gpu/double_buffer_loop_unrolling.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instruction_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
void SetChannelIdForNewCollective(HloInstruction* new_instr,
const HloModule* module) {
absl::flat_hash_map<int64_t, int64_t> old_to_new_channel_id_map;
absl::flat_hash_map<int64_t, HloComputation*> channel_id_comp_map;
if (new_instr->IsAsynchronous() && hlo_query::IsCollectiveCommunicationOp(
new_instr->async_wrapped_opcode())) {
HloInstruction* wrapped_instr =
DynCast<HloAsyncInstruction>(new_instr)->async_wrapped_instruction();
int64_t old_channel_id = *wrapped_instr->channel_id();
int64_t new_channel_id = old_to_new_channel_id_map[old_channel_id];
if (old_to_new_channel_id_map.find(old_channel_id) ==
old_to_new_channel_id_map.end()) {
new_channel_id = hlo_query::NextChannelId(*module);
VLOG(2) << "Generated new channel id " << new_channel_id;
old_to_new_channel_id_map[old_channel_id] = new_channel_id;
}
VLOG(2) << "Setting channel id to " << new_channel_id;
wrapped_instr->set_channel_id(new_channel_id);
if (channel_id_comp_map.find(new_channel_id) == channel_id_comp_map.end()) {
channel_id_comp_map[new_channel_id] =
new_instr->async_wrapped_computation();
} else {
channel_id_comp_map[new_channel_id]->AddAsyncStart(new_instr);
}
} else if (hlo_query::IsCollectiveCommunicationOp(new_instr->opcode()) ||
hlo_query::IsAsyncCollectiveStartOp(new_instr)) {
new_instr->set_channel_id(hlo_query::NextChannelId(*module));
}
}
using Interval = std::pair<int64_t, int64_t>;
absl::StatusOr<std::vector<Interval>> ParseVectorOfPairs(
absl::string_view str) {
TF_ASSIGN_OR_RETURN(std::vector<ReplicaGroup> replica_groups,
ParseReplicaGroupsOnly(str));
std::vector<Interval> res;
res.reserve(replica_groups.size());
for (const ReplicaGroup& replica_group : replica_groups) {
TF_RET_CHECK(replica_group.replica_ids_size() == 2);
int64_t a = replica_group.replica_ids(0);
int64_t b = replica_group.replica_ids(1);
res.emplace_back(a, b);
}
return res;
}
absl::Status SetSendRecvValidationForPeeledInstr(HloInstruction* new_instr,
HloInstruction* old_instr) {
TF_RET_CHECK(
new_instr->opcode() == old_instr->opcode() &&
"cloned instruction and original instruction have different opcodes");
if (!HloPredicateIsOp<HloOpcode::kCollectivePermute,
HloOpcode::kCollectivePermuteStart, HloOpcode::kSend,
HloOpcode::kRecv>(old_instr)) {
return absl::OkStatus();
}
const auto& attribute_map = new_instr->frontend_attributes().map();
if (!attribute_map.contains(kSendRecvValidationAttr)) {
return absl::OkStatus();
}
VLOG(3) << "Original send-recv iterations: "
<< attribute_map.at(kSendRecvValidationAttr);
TF_ASSIGN_OR_RETURN(
auto send_recv_validation_attr,
ParseVectorOfPairs(attribute_map.at(kSendRecvValidationAttr)));
uint64_t n_pairs = send_recv_validation_attr.size();
if (n_pairs == 0) {
return absl::OkStatus();
}
std::vector<Interval> send_recv_validation_attr_updated(n_pairs, {1, 0});
for (std::uint64_t i = 0; i < send_recv_validation_attr.size(); i++) {
if (send_recv_validation_attr[i].first <= 0 &&
send_recv_validation_attr[i].second >= 0) {
send_recv_validation_attr_updated[i] = {0, 0};
}
}
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
new_instr, kSendRecvValidationAttr,
send_recv_validation_attr_updated);
return absl::OkStatus();
}
absl::Status SetSendRecvValidation(HloInstruction* cp1, HloInstruction* cp2,
bool is_peeled) {
TF_RET_CHECK(
cp2->opcode() == cp1->opcode() &&
"cloned instruction and original instruction have different opcodes");
if (!HloPredicateIsOp<HloOpcode::kCollectivePermute,
HloOpcode::kCollectivePermuteStart, HloOpcode::kSend,
HloOpcode::kRecv>(cp1)) {
return absl::OkStatus();
}
const auto& attribute_map = cp2->frontend_attributes().map();
if (!attribute_map.contains(kSendRecvValidationAttr)) {
return absl::OkStatus();
}
VLOG(3) << "Original send-recv iterations: "
<< attribute_map.at(kSendRecvValidationAttr);
TF_ASSIGN_OR_RETURN(
auto send_recv_validation_attr,
ParseVectorOfPairs(attribute_map.at(kSendRecvValidationAttr)));
if (send_recv_validation_attr.size() == 0) {
return absl::OkStatus();
}
std::vector<Interval> send_recv_iterations_new_instr1,
send_recv_iterations_new_instr2;
send_recv_iterations_new_instr1.reserve(send_recv_validation_attr.size());
send_recv_iterations_new_instr2.reserve(send_recv_validation_attr.size());
for (const Interval& pair : send_recv_validation_attr) {
int64_t a = pair.first;
int64_t b = pair.second;
if (is_peeled) {
send_recv_iterations_new_instr1.emplace_back(
std::floor(a / 2.0), std::max(0.0, std::floor((b - 1) / 2.0)));
send_recv_iterations_new_instr2.emplace_back(
std::max(0.0, std::floor((a - 1) / 2.0)),
std::max(0.0, std::floor((b - 2) / 2.0)));
} else {
send_recv_iterations_new_instr1.emplace_back(std::floor((a + 1) / 2.0),
std::floor(b / 2.0));
send_recv_iterations_new_instr2.emplace_back(
std::floor(a / 2.0), std::max(0.0, std::floor((b - 1) / 2.0)));
}
}
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
cp1, kSendRecvValidationAttr,
send_recv_iterations_new_instr1);
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
cp2, kSendRecvValidationAttr,
send_recv_iterations_new_instr2);
VLOG(3) << "Updated send-recv iterations for " << cp1->name() << " : "
<< cp1->frontend_attributes().map().at(kSendRecvValidationAttr);
VLOG(3) << "Updated send-recv iterations for " << cp2->name() << " : "
<< cp2->frontend_attributes().map().at(kSendRecvValidationAttr);
return absl::OkStatus();
}
absl::Status HandleControlDependencies(
const HloComputation* while_body,
const absl::flat_hash_map<HloInstruction*, HloInstruction*>& old_to_new_map,
HloInstruction::InstructionVector* old_loop_roots,
HloInstruction* input_parameter,
const absl::flat_hash_set<HloInstruction*>& skip_control_dep_injection) {
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
HloInstruction* new_instr = old_to_new_map.at(old_instr);
VLOG(2) << "Processing control predecessors for "
<< new_instr->ToString();
std::vector<HloInstruction*> new_control_pred;
new_control_pred.reserve(old_instr->control_predecessors().size());
for (HloInstruction* pred : old_instr->control_predecessors()) {
if (!old_to_new_map.contains(pred)) {
continue;
}
new_control_pred.push_back(old_to_new_map.at(pred));
}
TF_RETURN_IF_ERROR(new_instr->DropAllControlDeps());
for (HloInstruction* new_pred : new_control_pred) {
TF_RETURN_IF_ERROR(new_pred->AddControlDependencyTo(new_instr));
VLOG(2) << "Adding " << new_pred->ToString()
<< " to control dependency of " << new_instr->ToString();
}
}
}
for (HloInstruction* input_consumer : input_parameter->users()) {
for (HloInstruction* old_input : input_consumer->users()) {
if (old_to_new_map.find(old_input) != old_to_new_map.end()) {
HloInstruction* new_input = old_to_new_map.at(old_input);
if (skip_control_dep_injection.find(old_input) ==
skip_control_dep_injection.end() &&
!IsCollective(old_input)) {
for (HloInstruction* old_root : *old_loop_roots) {
TF_RETURN_IF_ERROR(old_root->AddControlDependencyTo(new_input));
}
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> FullyUnroll(HloInstruction* while_instr,
HloModule* module) {
HloComputation* while_body = while_instr->while_body();
bool changed = false;
VLOG(2) << "Processing root " << while_body->root_instruction()->ToString();
auto loop_roots = while_body->root_instruction()->mutable_operands();
HloInstruction* input_parameter = while_body->parameter_instruction(0);
VLOG(2) << "Processing input parameter " << input_parameter->ToString();
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map;
absl::flat_hash_set<HloInstruction*> skip_control_dep_injection;
std::string clone_suffix = "full_unroll_clone";
TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config,
while_instr->backend_config<WhileLoopBackendConfig>());
std::vector<HloInstruction*> ops_to_clone;
ops_to_clone.reserve(while_body->MakeInstructionPostOrder().size());
HloInstruction* old_input_parameter = input_parameter;
HloInstruction* new_input_parameter = while_body->root_instruction();
absl::flat_hash_set<HloInstruction*> seen_ops;
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (seen_ops.contains(old_instr)) {
continue;
}
ops_to_clone.push_back(old_instr);
seen_ops.insert(old_instr);
}
int n = config.known_trip_count().n();
while (--n) {
std::vector<HloInstruction*> new_ops_to_clone;
old_to_new_map[old_input_parameter] = new_input_parameter;
for (HloInstruction* old_instr : ops_to_clone) {
if (old_to_new_map.contains(old_instr)) {
continue;
}
VLOG(2) << "Cloning instruction " << old_instr->ToString();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* old_operand : old_instr->mutable_operands()) {
new_operands.push_back(old_to_new_map[old_operand]);
}
HloInstruction* new_instr =
while_body->AddInstruction(old_instr->CloneWithNewOperands(
old_instr->shape(), new_operands, clone_suffix));
if (old_instr->IsElementwiseBinary() && old_instr->HasConstantOperand()) {
skip_control_dep_injection.insert(old_instr);
}
SetChannelIdForNewCollective(new_instr, module);
old_to_new_map[old_instr] = new_instr;
new_ops_to_clone.push_back(new_instr);
VLOG(2) << "Added instruction " << new_instr->ToString();
}
while_body->set_root_instruction(
old_to_new_map[while_body->root_instruction()]);
VLOG(2) << "Replaced with new root "
<< while_body->root_instruction()->ToString();
TF_RETURN_IF_ERROR(HandleControlDependencies(
while_body, old_to_new_map, &loop_roots, old_input_parameter,
skip_control_dep_injection));
old_to_new_map.clear();
skip_control_dep_injection.clear();
loop_roots = while_body->root_instruction()->mutable_operands();
old_input_parameter = new_input_parameter;
new_input_parameter = while_body->root_instruction();
ops_to_clone = std::move(new_ops_to_clone);
changed = true;
}
WhileLoopBackendConfig new_config;
new_config.mutable_known_trip_count()->set_n(1);
TF_RETURN_IF_ERROR(while_instr->set_backend_config(new_config));
return changed;
}
absl::Status PeelInstructionsForOddTripCount(HloModule* module,
HloInstruction* while_instr) {
std::string suffix = "peeled_double_buffer";
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map;
HloComputation* while_body = while_instr->while_body();
HloInstruction* input_parameter = while_body->parameter_instruction(0);
HloInstruction* input_tuple = while_instr->mutable_operand(0);
auto old_loop_roots = while_body->root_instruction()->mutable_operands();
HloComputation* parent_comp = while_instr->parent();
old_to_new_map[input_parameter] = input_tuple;
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
continue;
}
VLOG(2) << "Peeling instruction " << old_instr->ToString();
std::vector<HloInstruction*> new_operands(old_instr->operand_count());
for (int64_t i = 0; i < old_instr->operand_count(); i++) {
new_operands[i] = old_to_new_map[old_instr->mutable_operand(i)];
}
HloInstruction* new_instr =
parent_comp->AddInstruction(old_instr->CloneWithNewOperands(
old_instr->shape(), new_operands, suffix));
SetChannelIdForNewCollective(new_instr, module);
TF_CHECK_OK(SetSendRecvValidationForPeeledInstr(new_instr, old_instr));
old_to_new_map[old_instr] = new_instr;
VLOG(2) << "Added instruction " << new_instr->ToString()
<< " to parent computation.";
}
std::vector<HloInstruction*> new_roots;
for (HloInstruction* instr : old_loop_roots) {
new_roots.push_back(old_to_new_map[instr]);
}
TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(
0, old_to_new_map[while_body->root_instruction()]));
VLOG(2) << "Replaced with new input tuple "
<< while_instr->operand(0)->ToString();
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
HloInstruction* new_instr = old_to_new_map[old_instr];
VLOG(2) << "Processing control predecessors for peeled instruction "
<< new_instr->ToString();
std::vector<HloInstruction*> new_control_pred(
old_instr->control_predecessors().size());
for (HloInstruction* pred : old_instr->control_predecessors()) {
new_control_pred.push_back(old_to_new_map[pred]);
}
TF_RETURN_IF_ERROR(new_instr->DropAllControlDeps());
for (HloInstruction* new_pred : new_control_pred) {
TF_RETURN_IF_ERROR(new_pred->AddControlDependencyTo(new_instr));
VLOG(2) << "Adding " << new_pred->ToString()
<< " to control dependency of peeled instruction: "
<< new_instr->ToString();
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> DoubleBufferingUnroll(HloInstruction* while_instr,
HloModule* module) {
TF_ASSIGN_OR_RETURN(auto config,
while_instr->backend_config<WhileLoopBackendConfig>());
CHECK(config.has_known_trip_count())
<< "Only loops with known trip count are supported.";
int64_t exact_trip_count = config.known_trip_count().n();
VLOG(2) << "Processing while loop " << while_instr->ToString()
<< " with trip count: " << exact_trip_count;
HloComputation* while_body = while_instr->while_body();
VLOG(2) << "Processing root " << while_body->root_instruction()->ToString();
auto old_loop_roots = while_body->root_instruction()->mutable_operands();
HloInstruction* input_parameter = while_body->parameter_instruction(0);
VLOG(2) << "Processing input parameter " << input_parameter->ToString();
absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map;
absl::flat_hash_set<HloInstruction*> skip_control_dep_injection;
bool is_peeled = exact_trip_count % 2;
if (is_peeled) {
VLOG(2) << "Found loops with odd trip count, 1 iteration will be peeled "
"outside of the main body.";
TF_RETURN_IF_ERROR(PeelInstructionsForOddTripCount(module, while_instr));
exact_trip_count -= 1;
}
std::string suffix = "double_buffer_clone";
old_to_new_map[input_parameter] = while_body->root_instruction();
for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) {
if (old_to_new_map.find(old_instr) != old_to_new_map.end()) {
continue;
}
VLOG(2) << "Cloning instruction " << old_instr->ToString();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* old_operand : old_instr->mutable_operands()) {
new_operands.push_back(old_to_new_map[old_operand]);
}
HloInstruction* new_instr =
while_body->AddInstruction(old_instr->CloneWithNewOperands(
old_instr->shape(), new_operands, suffix));
if (old_instr->IsElementwiseBinary() && old_instr->HasConstantOperand()) {
skip_control_dep_injection.insert(old_instr);
}
SetChannelIdForNewCollective(new_instr, module);
TF_CHECK_OK(SetSendRecvValidation(old_instr, new_instr, is_peeled));
old_to_new_map[old_instr] = new_instr;
VLOG(2) << "Added instruction " << new_instr->ToString();
}
while_body->set_root_instruction(
old_to_new_map[while_body->root_instruction()]);
VLOG(2) << "Replaced with new root "
<< while_body->root_instruction()->ToString();
TF_RETURN_IF_ERROR(HandleControlDependencies(while_body, old_to_new_map,
&old_loop_roots, input_parameter,
skip_control_dep_injection));
WhileLoopBackendConfig new_config;
new_config.mutable_known_trip_count()->set_n(exact_trip_count / 2);
TF_RETURN_IF_ERROR(while_instr->set_backend_config(new_config));
return true;
}
}
absl::StatusOr<bool> DoubleBufferLoopUnrolling::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto comp : module->MakeNonfusionComputations()) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
VLOG(2) << "Processing " << while_instrs.size() << " while loops.";
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config,
while_instr->backend_config<WhileLoopBackendConfig>());
if (!config.has_known_trip_count()) {
VLOG(2) << while_instr->ToString()
<< " doesn't have exact trip count, skipping loop unrolling "
"for now";
continue;
}
if (unroll_strategy_ == UnrollStrategy::kFullUnroll) {
TF_ASSIGN_OR_RETURN(changed, FullyUnroll(while_instr, module));
} else if (unroll_strategy_ == UnrollStrategy::kDoubleBuffer) {
TF_ASSIGN_OR_RETURN(changed, DoubleBufferingUnroll(while_instr, module));
} else {
LOG(FATAL) << absl::StrCat("Unhandled unrolling strategy: ",
unroll_strategy_);
}
}
VLOG(2) << "LoopDoubleBufferTransformer output: " << module->ToString();
if (changed) {
TF_RETURN_IF_ERROR(
FlattenCallGraph().Run(module, execution_threads).status());
}
return changed;
}
}
} | #include "xla/service/gpu/double_buffer_loop_unrolling.h"
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/test.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using tsl::testing::IsOkAndHolds;
int64_t CountInstructions(const HloComputation& computation, HloOpcode opcode) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == opcode) {
count++;
}
}
return count;
}
int64_t CountInstructions(const HloModule& module, HloOpcode opcode) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountInstructions((*computation), opcode);
}
return count;
}
class GpuLoopDoubleBufferTransformerTest : public HloTestBase {
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_while_loop_double_buffering(true);
return debug_options;
}
};
TEST_F(GpuLoopDoubleBufferTransformerTest, FullUnrollOddTripCountTest) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
TupleSimplifier tuple_simp;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
11);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 11);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, FullUnrollEvenTripCountTest) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
TupleSimplifier tuple_simp;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* while_instruction;
for (auto instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
while_instruction = instr;
}
}
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
10);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 10);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopEvenTripCount) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
2);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 2);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopOddTripCount) {
const char* const kModuleString = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=3
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=2
cond = s32[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = s32[] constant(0)
tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllGatherStart),
2);
EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 3);
EXPECT_EQ(while_instruction->operand(0)->operand(2)->opcode(),
HloOpcode::kAllGatherDone);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
UnrolledLoopNoControlDepsForConstantAdd) {
const char* const kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
c2 = f32[] constant(2)
add = f32[] add(c2, param_0)
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output_tuple = (f32[], s32[]) tuple(add, cond_plus_1)
}
ENTRY main {
param_0 = f32[] parameter(0)
param_2 = s32[] constant(0)
tuple = (f32[], s32[]) tuple(param_0, param_2)
ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(
CountInstructions((*while_instruction->while_body()), HloOpcode::kAdd),
4);
EXPECT_EQ(while_instruction->while_body()
->root_instruction()
->operand(0)
->control_predecessors()
.size(),
0);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
UnrolledLoopNoControlDepsForCollective) {
const char* const kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
ar_add {
Arg_1 = f32[] parameter(1)
Arg_0 = f32[] parameter(0)
ROOT add_ar = f32[] add(Arg_1, Arg_0)
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
all-reduce-start = f32[] all-reduce-start(param_0), channel_id=8, replica_groups={{0}}, to_apply=ar_add, backend_config="{\"is_sync\":false}"
one = s32[] constant(1)
all-reduce-done = f32[] all-reduce-done(all-reduce-start)
cond_plus_1 = s32[] add(cond, one)
ROOT output_tuple = (f32[], s32[]) tuple(all-reduce-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[] parameter(0)
param_2 = s32[] constant(0)
tuple = (f32[], s32[]) tuple(param_0, param_2)
ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 5);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllReduceStart),
2);
absl::flat_hash_set<int64_t> channel_ids;
hlo_query::ForEachInstructionWithOpcode(
*while_instruction->while_body(), HloOpcode::kAllReduceStart,
[&channel_ids](HloInstruction* ar) {
EXPECT_EQ(ar->control_predecessors().size(), 0);
channel_ids.insert(*(ar->channel_id()));
});
EXPECT_EQ(channel_ids.size(), 2);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
FullyUnrolledLoopNoControlDepsForCollective) {
const char* const kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
ar_add {
Arg_1 = f32[] parameter(1)
Arg_0 = f32[] parameter(0)
ROOT add_ar = f32[] add(Arg_1, Arg_0)
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
all-reduce-start = f32[] all-reduce-start(param_0), channel_id=8, replica_groups={{0}}, to_apply=ar_add, backend_config="{\"is_sync\":false}"
one = s32[] constant(1)
all-reduce-done = f32[] all-reduce-done(all-reduce-start)
cond_plus_1 = s32[] add(cond, one)
ROOT output_tuple = (f32[], s32[]) tuple(all-reduce-done, cond_plus_1)
}
ENTRY main {
param_0 = f32[] parameter(0)
param_2 = s32[] constant(0)
tuple = (f32[], s32[]) tuple(param_0, param_2)
ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
TupleSimplifier tuple_simp;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true));
HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
while_instruction->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
EXPECT_EQ(CountInstructions((*while_instruction->while_body()),
HloOpcode::kAllReduceStart),
10);
absl::flat_hash_set<int64_t> channel_ids;
hlo_query::ForEachInstructionWithOpcode(
*while_instruction->while_body(), HloOpcode::kAllReduceStart,
[&channel_ids](HloInstruction* ar) {
EXPECT_EQ(ar->control_predecessors().size(), 0);
channel_ids.insert(*(ar->channel_id()));
});
EXPECT_EQ(channel_ids.size(), 10);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopRemainsFlattened) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_while_loop_remains_flattened
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<const HloComputation*> while_loops_callees;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile,
[&while_loops_callees](HloInstruction* instr) {
EXPECT_TRUE(
while_loops_callees.insert(instr->while_condition()).second);
EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second);
});
EXPECT_EQ(while_loops_callees.size(), 6);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
NestedWhileLoopRemainsFlattenedOddTripCount) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_while_loop_remains_flattened
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<const HloComputation*> while_loops_callees;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile,
[&while_loops_callees](HloInstruction* instr) {
EXPECT_TRUE(
while_loops_callees.insert(instr->while_condition()).second);
EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second);
});
EXPECT_EQ(while_loops_callees.size(), 8);
}
TEST_F(GpuLoopDoubleBufferTransformerTest,
NestedWhileLoopRemainsFlattenedWhenFullyUnrolled) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_while_loop_remains_flattened
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<const HloComputation*> while_loops_callees;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile,
[&while_loops_callees](HloInstruction* instr) {
EXPECT_TRUE(
while_loops_callees.insert(instr->while_condition()).second);
EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second);
});
hlo_query::ForEachInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile,
[](HloInstruction* instr) {
TF_ASSERT_OK_AND_ASSIGN(
WhileLoopBackendConfig config,
instr->backend_config<WhileLoopBackendConfig>());
int64_t exact_trip_count = config.known_trip_count().n();
EXPECT_EQ(exact_trip_count, 1);
});
EXPECT_EQ(while_loops_callees.size(), 22);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopAreUnrolled) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_are_unrolled
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested, backend_config={"known_trip_count":{"n":"11"}}
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer;
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
int64_t num_whiles = 0;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile, [&num_whiles](HloInstruction* instr) {
EXPECT_EQ(instr->backend_config<WhileLoopBackendConfig>()
->known_trip_count()
.n(),
5);
++num_whiles;
});
EXPECT_EQ(num_whiles, 4);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopAreFullyUnrolled) {
const char* const kModuleString = R"(
HloModule loop_unrolling_nested_are_unrolled
condition_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body_nested {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
one = s32[] constant(1)
cond_plus_1 = s32[] add(cond, one)
ROOT output = (s32[]) tuple(cond_plus_1)
}
condition {
input_tuple = (s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
body {
input_tuple = (s32[]) parameter(0)
ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested, backend_config={"known_trip_count":{"n":"11"}}
}
ENTRY main {
param_0 = (s32[]) parameter(0)
ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
DoubleBufferLoopUnrolling double_buffer(
DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll);
EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true));
int64_t num_whiles = 0;
hlo_query::ForEachInstructionWithOpcode(
*module, HloOpcode::kWhile, [&num_whiles](HloInstruction* instr) {
EXPECT_EQ(instr->backend_config<WhileLoopBackendConfig>()
->known_trip_count()
.n(),
1);
++num_whiles;
});
EXPECT_EQ(num_whiles, 12);
}
TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithCollectivePermute) {
const char* kModuleString = R"(
HloModule loop_unrolling_no_deps
condition {
input_tuple = (f32[], s32[]) parameter(0)
cond = s32[] get-tuple-element(input_tuple), index=1
trip_count = s32[] constant(10)
ROOT done = pred[] compare(cond, trip_count), direction=LT
}
ar_add {
Arg_1 = f32[] parameter(1)
Arg_0 = f32[] parameter(0)
ROOT add_ar = f32[] add(Arg_1, Arg_0)
}
body {
input_tuple = (f32[], s32[]) parameter(0)
param_0 = f32[] get-tuple-element(input_tuple), index=0
cond = s32[] get-tuple-element(input_tuple), index=1
collective-permute = f32[] collective-permu | 2,045 |
#ifndef XLA_SERVICE_GPU_GPU_ALL_GATHER_OPTIMIZER_H_
#define XLA_SERVICE_GPU_GPU_ALL_GATHER_OPTIMIZER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class AllGatherOptimizer : public HloModulePass {
public:
AllGatherOptimizer() = default;
absl::string_view name() const override { return "all-gather-optimizer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gpu_all_gather_optimizer.h"
#include <cstdint>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> AllGatherOptimizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (!HloOpcodeIsBinaryCommutative(instruction->opcode())) {
continue;
}
HloInstruction* left_op = instruction->mutable_operand(0);
HloInstruction* right_op = instruction->mutable_operand(1);
if (right_op->opcode() != HloOpcode::kAllGather ||
left_op->opcode() != HloOpcode::kAllGather) {
VLOG(2) << "Binary op's operands are not all-gather deduced types.";
continue;
}
auto* left_all_gather = Cast<HloAllGatherInstruction>(left_op);
auto* right_all_gather = Cast<HloAllGatherInstruction>(right_op);
if (right_all_gather->constrain_layout() !=
left_all_gather->constrain_layout() ||
right_all_gather->use_global_device_ids() !=
left_all_gather->use_global_device_ids() ||
!ReplicaGroupsEqual(right_all_gather->replica_groups(),
left_all_gather->replica_groups())) {
VLOG(2) << "The right and left all-gather ops are not compatible "
"to merge. ";
continue;
}
if (!ShapeUtil::Equal(left_all_gather->operand(0)->shape(),
right_all_gather->operand(0)->shape())) {
VLOG(2) << "all-gather operands have different shapes";
continue;
}
if (right_all_gather->user_count() != 1 ||
left_all_gather->user_count() != 1) {
VLOG(2) << "all-gather user_count > 1 ";
continue;
}
auto index_in_full_shape =
computation->AddInstruction(HloInstruction::CreateBinary(
right_all_gather->operand(0)->shape(), instruction->opcode(),
left_all_gather->mutable_operand(0),
right_all_gather->mutable_operand(0)));
int64_t all_gather_dimension =
Cast<HloAllGatherInstruction>(right_all_gather)
->all_gather_dimension();
auto combined = HloInstruction::CreateAllGather(
left_all_gather->shape(), {index_in_full_shape}, all_gather_dimension,
left_all_gather->device_list(),
false, left_all_gather->channel_id(),
Cast<HloAllGatherInstruction>(left_all_gather)
->use_global_device_ids());
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
instruction, std::move(combined)));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_all_gather_optimizer.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class GpuAllGatherOptimizerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t num_replicas,
int64_t num_partitions, bool expect_change) {
HloModuleConfig config = GetModuleConfigForTest(
num_replicas,
num_partitions);
config.set_use_spmd_partitioning(num_partitions > 1);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
auto changed = AllGatherOptimizer().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
template <HloOpcode oc>
size_t CollectiveCount(std::unique_ptr<HloModule> &module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<oc>);
}
};
TEST_F(GpuAllGatherOptimizerTest, BranchesOptimized) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.2 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.1 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 2);
}
TEST_F(GpuAllGatherOptimizerTest, DisbledSPMDPartitioningJAXBug) {
absl::string_view hlo_string = R"(
HloModule pjit_f, entry_computation_layout={(f32[4,8]{1,0}, f32[4,8]{1,0})->f32[8,8]{1,0}}
ENTRY %main.6_spmd (param: f32[4,8], param.1: f32[4,8]) -> f32[8,8] {
%param = f32[4,8]{1,0} parameter(0), sharding={devices=[2,1]<=[2]}
%all-gather = f32[8,8]{1,0} all-gather(f32[4,8]{1,0} %param), channel_id=1, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true, metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207}
%param.1 = f32[4,8]{1,0} parameter(1), sharding={devices=[2,1]<=[2]}
%all-gather.1 = f32[8,8]{1,0} all-gather(f32[4,8]{1,0} %param.1), channel_id=2, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true, metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207}
ROOT %add.0 = f32[8,8]{1,0} add(f32[8,8]{1,0} %all-gather, f32[8,8]{1,0} %all-gather.1), metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
1,
2,
true));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 1);
}
TEST_F(GpuAllGatherOptimizerTest, MoreThanSingleUserForAllGather) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
param.3 = bf16[8,128,1024]{2,1,0} parameter(2)
reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.2 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.3 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.3 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.3), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.1 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.3)
add.2 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 3);
}
TEST_F(GpuAllGatherOptimizerTest, AllGatherWithOpInBetweenOnRightBranch) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
param.3 = bf16[8,128,1024]{2,1,0} parameter(2)
reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
add.1 = bf16[8,64,1024]{2,1,0} add(reduce-scatter.1, reduce-scatter.2)
all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(add.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
reduce-scatter.3 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather.3 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.3), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.2 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
true));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 3);
}
TEST_F(GpuAllGatherOptimizerTest, AllGatherOneSided) {
absl::string_view hlo_string = R"(
HloModule ReduceScatter
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
ENTRY main {
param.1 = bf16[8,128,1024]{2,1,0} parameter(0)
param.2 = bf16[8,128,1024]{2,1,0} parameter(1)
param.3 = bf16[8,128,1024]{2,1,0} parameter(2)
add.1 = bf16[8,128,1024]{2,1,0} add(param.1, param.2)
reduce-scatter = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add
all-gather = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
add.2 = bf16[8,128,1024]{2,1,0} add(all-gather, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 1);
EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 1);
}
TEST_F(GpuAllGatherOptimizerTest, DifferentOperandShapes) {
absl::string_view hlo_string = R"(
HloModule TestModule
ENTRY main {
param.1 = bf16[8,64,128]{2,1,0} parameter(0)
param.2 = bf16[8,128,64]{2,1,0} parameter(1)
all-gather.1 = bf16[8,128,128]{2,1,0} all-gather(param.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
all-gather.2 = bf16[8,128,128]{2,1,0} all-gather(param.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true
add.1 = bf16[8,128,128]{2,1,0} add(all-gather.1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string,
8,
1,
false));
}
}
}
} | 2,046 |
#ifndef XLA_SERVICE_GPU_TRITON_FUSION_ANALYSIS_H_
#define XLA_SERVICE_GPU_TRITON_FUSION_ANALYSIS_H_
#include <map>
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
class TritonFusionAnalysis {
absl::Status ExecuteForDotFusion(const HloInstruction& dot, int split_k);
public:
static absl::StatusOr<TritonFusionAnalysis> Execute(
const HloComputation& computation, int split_k = 1);
static absl::Status ExecuteForProducerConsumer(const HloInstruction& producer,
const HloInstruction& consumer,
int split_k = 1);
enum class Scope { LHS = 0, RHS = 1, META = 2, OUTPUT = 3 };
using IterationSpecByInstructionMap =
ConstHloInstructionMap<TensorIterationSpec>;
using IterationSpecByInstructionByScopeMap =
std::map<Scope, IterationSpecByInstructionMap>;
static constexpr int kMaxParameterPerDotOperand = 4;
const TensorIterationSpec::DimIterationSpec* IterSpec(Scope scope,
const HloInstruction*,
int dimension) const;
const ConstHloInstructionSet& ScopeParameters(const Scope scope) const {
return parameters_.at(scope);
}
std::optional<Scope> QueryInstructionScope(const HloInstruction& hlo) const;
std::string ToString() const;
private:
IterationSpecByInstructionByScopeMap iter_specs_;
std::map<Scope, ConstHloInstructionSet> parameters_;
};
namespace triton_fusion {
class FusionContext {
FusionContext(DotProperties properties, DotRequirements requirements)
: properties_(properties), requirements_(requirements) {}
public:
static absl::StatusOr<FusionContext> FromDotOperand(const HloInstruction& dot,
int operand_number,
int split_k = 1);
static FusionContext FromDotOutput(const HloInstruction& dot, int split_k,
DotRequirements requirements);
bool CombineDimOrdersAndReqs(const DimOrdersAndReqs& update);
absl::Status PropagateDimensionOrdersToParameters(
const HloInstruction& origin, ConstHloInstructionSet& parameters,
ConstHloInstructionMap<TensorIterationSpec>& iter_specs);
const DotProperties& dot_properties() const { return properties_; }
const DimOrderMap& dim_orders() const { return dim_orders_; }
const DotRequirements& requirements() const { return requirements_; }
private:
const DotProperties properties_;
DotRequirements requirements_;
DimOrderMap dim_orders_;
};
}
}
}
#endif
#include "xla/service/gpu/triton_fusion_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <variant>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using triton_fusion::DimOrdersAndReqs;
using triton_fusion::DimOrdersAndReqsOrError;
using triton_fusion::DotRequirements;
using triton_fusion::FusionContext;
using triton_fusion::GetPropagatedDimOrdersAndRequirements;
using triton_fusion::kNoSplitRequirement;
using triton_fusion::TransformDirection;
}
namespace triton_fusion {
absl::StatusOr<FusionContext> FusionContext::FromDotOperand(
const HloInstruction& dot, const int operand_number, const int split_k) {
const int num_split_k_batch_dims = split_k > 1;
int split_k_dimension_index = kNoDimensionIndex;
TF_ASSIGN_OR_RETURN(int contracting_dimension_index,
ContractingDimensionIndex(dot, operand_number));
TF_ASSIGN_OR_RETURN(int non_contracting_dimension_index,
NonContractingDimensionIndex(dot, operand_number));
if (split_k > 1) {
split_k_dimension_index = contracting_dimension_index - 1;
}
int splittable_dimension_index = kNoDimensionIndex;
if (operand_number == 0 &&
dot.dot_dimension_numbers().lhs_batch_dimensions_size() -
num_split_k_batch_dims ==
0) {
splittable_dimension_index = non_contracting_dimension_index;
}
FusionContext context(DotProperties{non_contracting_dimension_index,
splittable_dimension_index},
DotRequirements(kNoSplitRequirement));
context.dim_orders_[dot.operand(operand_number)] =
DimensionOrder::FromDotOperandOrOutput(*dot.operand(operand_number),
split_k_dimension_index);
return context;
}
FusionContext FusionContext::FromDotOutput(
const HloInstruction& dot, const int split_k,
DotRequirements requirements) {
int splittable_dimension_index = kNoDimensionIndex;
if (requirements.splittable_dimension_major_part_size > 1) {
splittable_dimension_index = (split_k > 1) ? 1 : 0;
}
FusionContext context(DotProperties{-1,
splittable_dimension_index},
std::move(requirements));
context.dim_orders_[&dot] = DimensionOrder::FromDotOperandOrOutput(dot);
return context;
}
namespace {
int64_t NumAddedParameters(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kConstant &&
!ShapeUtil::IsScalar(hlo.shape())) {
return 0;
}
return hlo.operand_count() - 1;
}
}
bool FusionContext::CombineDimOrdersAndReqs(const DimOrdersAndReqs& update) {
for (const auto& [key, value] : update.dim_orders) {
auto it = dim_orders_.find(key);
if (it != dim_orders_.cend() && !it->second.IsPhysicallyEquivalent(value)) {
return false;
}
}
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements_, update.requirements);
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return false;
}
requirements_ = std::move(std::get<DotRequirements>(requirements_or_error));
dim_orders_.insert(update.dim_orders.begin(), update.dim_orders.end());
return true;
}
absl::Status FusionContext::PropagateDimensionOrdersToParameters(
const HloInstruction& origin, ConstHloInstructionSet& parameters,
ConstHloInstructionMap<TensorIterationSpec>& iter_specs) {
absl::flat_hash_set<const HloInstruction*> visited;
std::queue<const HloInstruction*> to_process;
visited.insert(&origin);
to_process.push(&origin);
while (!to_process.empty()) {
const HloInstruction* hlo = to_process.front();
to_process.pop();
if (hlo->opcode() == HloOpcode::kParameter) {
if (!parameters.insert(hlo).second) {
return FailedPrecondition(
"A parameter is read differently by different users. hlo: %s",
hlo->ToString());
}
VLOG(5) << hlo->ToString();
}
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*hlo, dim_orders_.at(hlo), TransformDirection::kOutputToInput,
properties_);
if (!std::holds_alternative<DimOrdersAndReqs>(result)) {
return FailedPrecondition(
"Can not propagate dim orders and requirements.");
}
if (!CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result))) {
return FailedPrecondition("Can not combine dim orders and requirements.");
}
iter_specs[hlo] = dim_orders_.at(hlo).ToTensorIterationSpec();
for (const HloInstruction* operand : hlo->operands()) {
if (!visited.insert(operand).second) {
continue;
}
if (operand->opcode() == HloOpcode::kDot) {
continue;
}
to_process.push(operand);
}
}
return absl::OkStatus();
}
}
absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute(
const HloComputation& computation, const int split_k) {
VLOG(5) << computation.ToString(HloPrintOptions::ShortParsable());
TritonFusionAnalysis analysis;
const HloInstruction* dot =
hlo_query::GetFirstInstructionWithOpcode(computation, HloOpcode::kDot);
TF_RET_CHECK(dot != nullptr);
TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(*dot, split_k));
return analysis;
}
absl::Status TritonFusionAnalysis::ExecuteForProducerConsumer(
const HloInstruction& producer, const HloInstruction& consumer,
int split_k) {
std::unique_ptr<HloModule> new_module =
ExtractProducerConsumerIntoNewModule(producer, consumer);
auto* new_producer =
new_module->entry_computation()->GetInstructionWithName(producer.name());
auto* new_consumer =
new_module->entry_computation()->GetInstructionWithName(consumer.name());
std::unique_ptr<HloInstruction> fusion_instruction_holder;
HloInstruction* fusion_instruction;
if (new_consumer->opcode() == HloOpcode::kFusion) {
fusion_instruction = new_consumer;
} else {
fusion_instruction_holder = HloInstruction::CreateFusion(
new_consumer->shape(), new_producer->fusion_kind(), new_consumer);
fusion_instruction = fusion_instruction_holder.get();
}
if (new_producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(new_producer);
} else {
fusion_instruction->FuseInstruction(new_producer);
}
auto* fused_computation =
fusion_instruction->fused_instructions_computation();
return Execute(*fused_computation, split_k).status();
}
absl::Status TritonFusionAnalysis::ExecuteForDotFusion(
const HloInstruction& dot, const int split_k) {
DotRequirements lhs_requirements(kNoSplitRequirement);
for (const Scope scope : {Scope::LHS, Scope::RHS, Scope::META}) {
const int operand_number = static_cast<int>(scope);
if (dot.operand_count() < operand_number + 1) {
continue;
}
TF_ASSIGN_OR_RETURN(auto context, FusionContext::FromDotOperand(
dot, operand_number, split_k));
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*dot.operand(operand_number), parameters_[scope], iter_specs_[scope]));
if (scope == Scope::LHS) {
lhs_requirements = context.requirements();
}
}
auto context = FusionContext::FromDotOutput(dot, split_k, lhs_requirements);
const HloInstruction* output = ˙
while (!output->IsRoot()) {
TF_RET_CHECK(output->user_count() == 1);
const HloInstruction* input = output;
if (IsWorkspaceAllocationRoot(*output->users()[0])) {
break;
}
output = output->users()[0];
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*output, context.dim_orders().at(input),
TransformDirection::kInputToOutput, context.dot_properties());
TF_RET_CHECK(std::holds_alternative<DimOrdersAndReqs>(result));
TF_RET_CHECK(
context.CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result)));
}
TF_RET_CHECK(
iter_specs_[Scope::OUTPUT]
.insert(
{output, context.dim_orders().at(output).ToTensorIterationSpec()})
.second);
parameters_[Scope::OUTPUT] = {};
if (output != &dot) {
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*output, parameters_[Scope::OUTPUT], iter_specs_[Scope::OUTPUT]));
}
return absl::OkStatus();
}
std::optional<TritonFusionAnalysis::Scope>
TritonFusionAnalysis::QueryInstructionScope(const HloInstruction& hlo) const {
for (const Scope& scope : {Scope::LHS, Scope::RHS, Scope::OUTPUT}) {
if (iter_specs_.at(scope).count(&hlo) > 0) {
return scope;
}
}
LOG(WARNING) << "No scope for hlo: " << hlo.ToString();
return std::nullopt;
}
const TensorIterationSpec::DimIterationSpec* TritonFusionAnalysis::IterSpec(
const TritonFusionAnalysis::Scope scope, const HloInstruction* hlo,
const int dimension) const {
auto hlo_spec = iter_specs_.at(scope).find(hlo);
if (hlo_spec != iter_specs_.at(scope).cend()) {
return hlo_spec->second.Find(dimension);
}
return nullptr;
}
namespace {
std::string IterationSpecByInstructionMapToString(
const TritonFusionAnalysis::IterationSpecByInstructionMap& m) {
return absl::StrCat("IterSpec{",
absl::StrJoin(m, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(s, kv.first->name(), ": ",
kv.second.ToString());
}),
"}");
}
std::string ScopeToString(TritonFusionAnalysis::Scope s) {
switch (s) {
case TritonFusionAnalysis::Scope::LHS:
return "LHS";
case TritonFusionAnalysis::Scope::RHS:
return "RHS";
case TritonFusionAnalysis::Scope::META:
return "META";
case TritonFusionAnalysis::Scope::OUTPUT:
return "OUTPUT";
}
}
}
std::string TritonFusionAnalysis::ToString() const {
return absl::StrCat(
"TritonFusionAnalysis{\n",
absl::StrJoin(iter_specs_, ",\n",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, ScopeToString(kv.first), ": ",
IterationSpecByInstructionMapToString(kv.second));
}),
"\n}");
}
}
} | #include "xla/service/gpu/triton_fusion_analysis.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gemm_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
using TritonDotAnalysisTest = HloTestBase;
TEST_F(TritonDotAnalysisTest, QueryingOutputScopeParametersAlwaysWorks) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_dot {
p0 = f32[8,8] parameter(0)
ROOT dot = f32[8,8] dot(p0, p0),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[8,8] parameter(0)
ROOT r = f32[8,8] fusion(p0), kind=kCustom, calls=triton_dot
})"));
TF_ASSERT_OK_AND_ASSIGN(
const auto analysis,
TritonFusionAnalysis::Execute(*module->entry_computation()
->root_instruction()
->called_computations()[0]));
EXPECT_TRUE(
analysis.ScopeParameters(TritonFusionAnalysis::Scope::OUTPUT).empty());
}
TEST_F(TritonDotAnalysisTest, NopBitcasts) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[48,4]{1,0} parameter(0)
bitcast.18 = s8[1,48,4]{2,1,0} bitcast(param_0.1)
bitcast.19 = s8[48,4]{1,0} bitcast(bitcast.18)
convert.4 = bf16[48,4]{1,0} convert(bitcast.19)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[48,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 48, 0,
48, ElementsAre(48))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3, 0,
3, ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, DoNotRemoveTrivialDimensionForDot) {
const std::string hlo_text = R"(
HloModule t, is_scheduled=true
triton_dot {
param_0.1 = f32[137,115]{1,0} parameter(0)
param_1.1 = f32[1,115]{1,0} parameter(1)
ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[137,115]{1,0} parameter(0)
p1 = f32[1,115]{1,0} parameter(1)
ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(115, 137, 0,
137, ElementsAre(137))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(115, 1, 0,
1, ElementsAre(1))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
}
TEST_F(TritonDotAnalysisTest, Merge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,8,6,4]{3,2,1,0} parameter(0)
bitcast.18 = s8[48,4]{1,0} bitcast(param_0.1)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,8,6,4]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 6 * 8,
0, 6 * 8,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, Split) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
%parameter_1 = f32[24000,2]{1,0} parameter(1)
%convert.15 = f16[24000,2]{1,0} convert(%parameter_1)
%parameter_0 = f16[4]{0} parameter(0)
%bitcast.45 = f16[2,2]{1,0} bitcast(%parameter_0)
ROOT %dot.26 = f16[24000,2]{1,0} dot(%convert.15, %bitcast.45),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[4]{0} parameter(0)
p1 = f32[24000,2]{1,0} parameter(1)
ROOT r = f16[24000,2]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p0);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 0),
ElementsAre(FieldsAre(2, 24000,
0, 24000,
ElementsAre(24000))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 0),
ElementsAre(FieldsAre(2, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
}
TEST_F(TritonDotAnalysisTest, TransposeMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
transpose.3 = s8[1,8,6,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = s8[48,4]{1,0} bitcast(transpose.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, CopyMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
bitcast.99 = s8[1,8,6,4]{2,1,3,0} bitcast(param_0.1)
copy.3 = s8[1,8,6,4]{3,2,1,0} copy(bitcast.99)
bitcast.18 = s8[48,4]{1,0} bitcast(copy.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeMergeNCN) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
transpose.3 = bf16[3,8,1,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = bf16[24,4]{1,0} bitcast(transpose.3)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[24,3]{1,0} dot(bitcast.18, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[24,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton", called_computations={triton_dot}
ROOT bitcast.2 = bf16[3,8,1,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8,
0, 8,
ElementsAre(8)),
FieldsAre(4 * 8, 3,
0, 3,
ElementsAre(3))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeOutput) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
bc = bf16[12,2,3]{2,1,0} bitcast(dot)
ROOT t = bf16[3,12,2]{2,1,0} transpose(bc), dimensions={2,0,1}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
ROOT r = bf16[3,12,2]{2,1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* dot_output = dot_computation->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(2, 12))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, OutputParameterIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
p2 = f16[3,24]{1,0} parameter(2)
p2t = f16[24,3]{1,0} transpose(p2), dimensions={1,0}
p2tc = bf16[24,3]{1,0} convert(p2t)
ROOT r = bf16[24,3]{1,0} divide(p2tc, dot)
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
p2 = f16[3,24]{1,0} parameter(2)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1, p2), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* output_param =
dot_computation->parameter_instruction(2);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(24))));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromScalarIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
p1b = bf16[4,3] broadcast(p1)
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* scalar = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 0),
nullptr);
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 1),
nullptr);
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromVectorIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
p1b = bf16[4,3] broadcast(p1), dimensions={0}
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* vector = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0)->size(),
1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
}
TEST_F(TritonDotAnalysisTest, OutputBroadcastIsNotAccepted) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
ENTRY e {
p0 = f16[2,35] parameter(0)
p0c = bf16[2,35] convert(p0)
p1 = bf16[35,2] parameter(1)
dot = bf16[2,2] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT bc = bf16[2,2,100] broadcast(dot), dimensions={0,1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kBroadcast);
}
TEST_F(TritonDotAnalysisTest, DegenerateSplitFragmentIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)
copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)
bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)
convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)
bitcast.32 = bf16[58,913]{1,0} parameter(1)
dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)
copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)
ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)
}
ENTRY e {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[58,913]{1,0} parameter(1)
ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,
calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT,
dot_computation->root_instruction(), 0),
ElementsAre(FieldsAre(1, 8 * 21,
0, 8 * 21,
ElementsAre(21, 8)),
FieldsAre(8 * 21 * 58, 30,
0, 30,
ElementsAre(30))));
}
TEST_F(TritonDotAnalysisTest,
HandlesFurtherPropagationFromTrivialSizedTensorGracefully) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
a = f32[3,3]{1,0} parameter(0)
constant = f32[1,1]{1,0} constant({ {0} })
broadcast = f32[1,1]{1,0} broadcast(constant), dimensions={0,1}
reshape = f32[] reshape(broadcast)
broadcast2 = f32[3,3]{1,0} broadcast(reshape), dimensions={}
ROOT dot = f32[3,3]{1,0} dot(a, broadcast2),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
a = f32[3,3]{1,0} parameter(0)
ROOT dot = f32[3,3]{1,0} fusion(a), kind=kCustom, calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
absl::StatusOr<TritonFusionAnalysis> analysis =
TritonFusionAnalysis::Execute(*dot_computation);
(void)analysis;
}
TEST_F(TritonDotAnalysisTest, DynamicSliceIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
dynamic_slice = f32[64,2]{1,0} dynamic-slice(dynamic_slice_input,
start_index0, start_index1),
dynamic_slice_sizes={64,2}
ROOT dot = f32[18,64]{1,0} dot(dot_lhs, dynamic_slice),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
ROOT triton_gemm_d = f32[18,64]{1,0} fusion(dot_lhs, dynamic_slice_input,
start_index0, start_index1),
kind=kCustom,
calls=triton_gemm,
backend_config={"kind":"__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(18, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 18,
0, 18,
ElementsAre(18))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(2, | 2,047 |
#ifndef XLA_SERVICE_GPU_CUDNN_VECTORIZE_CONVOLUTIONS_H_
#define XLA_SERVICE_GPU_CUDNN_VECTORIZE_CONVOLUTIONS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class CudnnVectorizeConvolutions : public HloModulePass {
public:
explicit CudnnVectorizeConvolutions(
se::CudaComputeCapability compute_capability,
se::dnn::VersionInfo cudnn_version)
: compute_capability_(compute_capability),
cudnn_version_(cudnn_version) {}
absl::string_view name() const override {
return "cudnn_vectorize_convolutions";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::CudaComputeCapability compute_capability_;
const se::dnn::VersionInfo cudnn_version_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
static std::vector<HloCustomCallInstruction*> GetRelevantConvs(
HloComputation* comp) {
std::vector<HloCustomCallInstruction*> convs;
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kCustomCall ||
(instr->custom_call_target() != kCudnnConvForwardCallTarget &&
instr->custom_call_target() !=
kCudnnConvBiasActivationForwardCallTarget) ||
instr->operand_count() < 2) {
continue;
}
PrimitiveType input_ty = instr->operand(0)->shape().element_type();
PrimitiveType output_ty = instr->shape().tuple_shapes(0).element_type();
if (input_ty == output_ty && (input_ty == S8 || input_ty == U8)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
return convs;
}
static absl::StatusOr<HloComputation*> BuilderToHloComputation(
XlaBuilder& b, XlaOp root, HloComputation* sibling_computation) {
TF_ASSIGN_OR_RETURN(XlaComputation comp, b.Build(root));
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comp.proto(), config));
HloModule* dest_module = sibling_computation->parent();
HloCloneContext context(dest_module);
return dest_module->DeepCloneComputation(new_module->entry_computation(),
&context);
}
static XlaOp SplitAtDim(XlaOp instr, int64_t dim, int64_t vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
DimensionVector new_dims(shape.dimensions().begin(),
shape.dimensions().end());
CHECK_EQ(new_dims[dim] % vect_size, 0);
new_dims[dim] /= vect_size;
new_dims.insert(new_dims.begin() + dim + 1, vect_size);
return Reshape(instr, new_dims);
}
static Shape SplitShapeAtDim(Shape shape, int64_t dim, int64_t vect_size) {
DimensionVector new_dims(shape.dimensions().begin(),
shape.dimensions().end());
CHECK_EQ(new_dims[dim] % vect_size, 0);
new_dims[dim] /= vect_size;
new_dims.insert(new_dims.begin() + dim + 1, vect_size);
return ShapeUtil::MakeShape(shape.element_type(), new_dims);
}
static XlaOp MoveDim(XlaOp instr, int64_t src, int64_t dst) {
XlaBuilder& b = *instr.builder();
int64_t rank = b.GetShape(instr)->dimensions_size();
DimensionVector idxs(rank);
absl::c_iota(idxs, 0);
if (src < dst) {
idxs.insert(idxs.begin() + dst, src);
idxs.erase(idxs.begin() + src);
} else {
idxs.erase(idxs.begin() + src);
idxs.insert(idxs.begin() + dst, src);
}
return Transpose(instr, idxs);
}
static XlaOp RevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,
int64_t vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
auto size = [&](int64_t d) { return shape.dimensions(d); };
CHECK_LE(size(vect_dim), vect_size);
CHECK_EQ(vect_size % size(vect_dim), 0);
int64_t split_factor = vect_size / size(vect_dim);
CHECK_EQ(size(dim) % split_factor, 0);
instr = SplitAtDim(instr, dim, split_factor);
if (vect_dim > dim) {
vect_dim++;
}
instr = MoveDim(instr, dim + 1, vect_dim);
if (vect_dim > dim) {
vect_dim--;
}
return Collapse(instr, {vect_dim, vect_dim + 1});
}
static XlaOp UnrevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,
int64_t orig_vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
auto size = [&](int64_t d) { return shape.dimensions(d); };
CHECK_GE(size(vect_dim), orig_vect_size);
CHECK_EQ(size(vect_dim) % orig_vect_size, 0);
instr = SplitAtDim(instr, vect_dim, orig_vect_size);
if (dim > vect_dim) {
dim++;
}
instr = MoveDim(instr, vect_dim, dim + 1);
if (dim > vect_dim) {
dim--;
}
return Collapse(instr, {dim, dim + 1});
}
static ConvolutionDimensionNumbers VectorizeDnums(
ConvolutionDimensionNumbers dnums, bool reordered_filter) {
int64_t input_vect_dim = dnums.input_feature_dimension();
if (dnums.input_batch_dimension() > input_vect_dim) {
dnums.set_input_batch_dimension(dnums.input_batch_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_input_spatial_dimensions()) {
if (d > input_vect_dim) {
++d;
}
}
if (!reordered_filter) {
int64_t kernel_vect_dim = dnums.kernel_input_feature_dimension();
if (dnums.kernel_output_feature_dimension() > kernel_vect_dim) {
dnums.set_kernel_output_feature_dimension(
dnums.kernel_output_feature_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_kernel_spatial_dimensions()) {
if (d > kernel_vect_dim) {
++d;
}
}
}
int64_t output_vect_dim = dnums.output_feature_dimension();
if (dnums.output_batch_dimension() > output_vect_dim) {
dnums.set_output_batch_dimension(dnums.output_batch_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_output_spatial_dimensions()) {
if (d > output_vect_dim) {
++d;
}
}
return dnums;
}
absl::Status ReorderInt8NchwVect(HloCustomCallInstruction* conv,
XlaOp* operands) {
bool has_bias = conv->operand_count() > 2;
VLOG(1) << "Reordering filter" << (has_bias ? " and bias" : "")
<< " (replacement for cudnnReorderFilterAndBias)";
auto builder = operands->builder();
ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers();
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
config.set_reordered_int8_nchw_vect(true);
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_ASSIGN_OR_RETURN(Shape filter_shape, builder->GetShape(operands[1]));
TF_ASSIGN_OR_RETURN(auto reorder, CudnnInferTransposeForFilterReordering(
filter_shape, dnums));
XlaOp reshape = Reshape(reorder.transpose_shape, operands[1]);
XlaOp transpose = Transpose(reshape, reorder.permutation);
operands[1] = Reshape(reorder.result_shape, transpose);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_spatial_dimensions(0, 2);
dnums.set_kernel_spatial_dimensions(1, 3);
conv->set_convolution_dimension_numbers(dnums);
if (has_bias) {
TF_ASSIGN_OR_RETURN(Shape bias_shape, builder->GetShape(operands[2]));
TF_ASSIGN_OR_RETURN(reorder,
CudnnInferTransposeForBiasReordering(bias_shape));
reshape = Reshape(reorder.transpose_shape, operands[2]);
transpose = Transpose(reshape, reorder.permutation);
operands[2] = Reshape(reorder.result_shape, transpose);
}
return absl::OkStatus();
}
static absl::StatusOr<bool> TryRevectorizeConv(
const se::CudaComputeCapability& compute_capability,
const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,
int vect_size) {
const Shape& input_shape = conv->operand(0)->shape();
const Shape& kernel_shape = conv->operand(1)->shape();
const Shape& output_shape = conv->shape().tuple_shapes(0);
const ConvolutionDimensionNumbers* dnums =
&conv->convolution_dimension_numbers();
std::optional<int64_t> input_vect_dim;
std::optional<int64_t> kernel_vect_dim;
std::optional<int64_t> output_vect_dim;
std::tie(input_vect_dim, kernel_vect_dim, output_vect_dim) =
FindVectorizedFeatureDims(*dnums, input_shape, kernel_shape,
output_shape);
if (!input_vect_dim.has_value() || !kernel_vect_dim.has_value() ||
!output_vect_dim.has_value()) {
return false;
}
int64_t input_feat_size =
input_shape.dimensions(dnums->input_feature_dimension());
int64_t output_feat_size =
output_shape.dimensions(dnums->output_feature_dimension());
int64_t input_vect_size = input_shape.dimensions(*input_vect_dim);
int64_t output_vect_size = output_shape.dimensions(*output_vect_dim);
if (vect_size % input_vect_size != 0 || vect_size % output_vect_size != 0 ||
input_feat_size % (vect_size / input_vect_size) != 0 ||
output_feat_size % (vect_size / output_vect_size) != 0) {
return false;
}
if (primitive_util::IsIntegralType(input_shape.element_type())) {
TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, vect_size));
if (!supported_target_vectorization) {
VLOG(3) << "Skipping re-vectorization of conv to vector size: "
<< vect_size << ": " << conv->ToString();
return false;
}
}
VLOG(1) << "Re-vectorizing conv channels from "
<< input_shape.dimensions(*input_vect_dim) << " to " << vect_size
<< ": " << conv->ToString();
XlaBuilder b(absl::StrCat(conv->name(), ".revectorized"));
b.SetOpMetadata(conv->metadata());
XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter");
absl::InlinedVector<XlaOp, 4> new_operands = {
RevectorizeInstr(Parameter(&b, 0, conv->operand(0)->shape(), "input"),
dnums->input_feature_dimension(), *input_vect_dim,
vect_size),
RevectorizeInstr(filter, dnums->kernel_input_feature_dimension(),
*kernel_vect_dim, vect_size),
};
if (conv->operand_count() > 2) {
new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias"));
}
if (conv->operand_count() > 3) {
new_operands.push_back(RevectorizeInstr(
Parameter(&b, 3, conv->operand(3)->shape(), "side_input"),
dnums->input_feature_dimension(), *input_vect_dim, vect_size));
}
if (conv->operand_count() > 4) {
return InvalidArgument(
"Don't understand a conv with more than 4 arguments: %s",
conv->ToString());
}
const auto& debug_options = conv->GetModule()->config().debug_options();
bool use_reordering =
input_shape.element_type() == xla::S8 && vect_size == 32 &&
debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&
cudnn_version >= se::dnn::VersionInfo{8, 3, 0};
if (use_reordering) {
int64_t kernel_vect_size = kernel_shape.dimensions(*kernel_vect_dim);
if (kernel_vect_size == 4 || kernel_vect_size == 32) {
new_operands[1] = filter;
}
TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));
dnums = &conv->convolution_dimension_numbers();
}
DimensionVector new_output_dims(output_shape.dimensions().begin(),
output_shape.dimensions().end());
new_output_dims[dnums->output_feature_dimension()] /=
(vect_size / output_vect_size);
new_output_dims[*output_vect_dim] = vect_size;
XlaOp new_conv = CustomCallWithConvDnums(
&b, conv->custom_call_target(), new_operands,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(output_shape.element_type(), new_output_dims),
ShapeUtil::MakeShape(U8, {0})}),
{},
conv->raw_backend_config_string(), false,
{}, nullptr,
conv->window(),
*dnums);
XlaOp new_conv_result = GetTupleElement(new_conv, 0);
XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);
XlaOp new_conv_result_unrevectorized = UnrevectorizeInstr(
new_conv_result, dnums->output_feature_dimension(), *output_vect_dim,
output_shape.dimensions(*output_vect_dim));
TF_ASSIGN_OR_RETURN(
HloComputation * new_conv_comp,
BuilderToHloComputation(
b, Tuple(&b, {new_conv_result_unrevectorized, new_conv_scratch}),
conv->parent()));
auto new_conv_comp_instrs = new_conv_comp->instructions();
auto new_conv_it =
absl::c_find_if(new_conv_comp_instrs, [](HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall;
});
if (new_conv_it != new_conv_comp_instrs.end()) {
new_conv_comp->parent()->SetAndUniquifyInstrName(*new_conv_it,
conv->name());
}
VLOG(1) << "Re-vectorized conv to " << new_conv_comp->ToString();
TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(
conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),
new_conv_comp)));
return true;
}
static absl::StatusOr<bool> TryVectorizeConv(
const se::CudaComputeCapability& compute_capability,
const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,
int64_t vect_size) {
const Shape& input_shape = conv->operand(0)->shape();
const Shape& output_shape = conv->shape().tuple_shapes(0);
const ConvolutionDimensionNumbers* dnums =
&conv->convolution_dimension_numbers();
int64_t in_channels =
input_shape.dimensions(dnums->input_feature_dimension());
int64_t out_channels =
output_shape.dimensions(dnums->output_feature_dimension());
if (in_channels % vect_size != 0 || out_channels % vect_size != 0) {
return false;
}
if (input_shape.dimensions_size() >
2 + dnums->input_spatial_dimensions_size()) {
return false;
}
if (primitive_util::IsIntegralType(input_shape.element_type())) {
TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, vect_size));
if (!supported_target_vectorization) {
VLOG(3) << "Skipping vectorization of conv to vector size: " << vect_size
<< ": " << conv->ToString();
return false;
}
}
VLOG(1) << "Vectorizing conv channels by " << vect_size << ": "
<< conv->ToString();
XlaBuilder b(absl::StrCat(conv->name(), ".revectorized"));
b.SetOpMetadata(conv->metadata());
XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter");
absl::InlinedVector<XlaOp, 4> new_operands = {
SplitAtDim(Parameter(&b, 0, conv->operand(0)->shape(), "input"),
dnums->input_feature_dimension(), vect_size),
SplitAtDim(filter, dnums->kernel_input_feature_dimension(), vect_size),
};
if (conv->operand_count() > 2) {
new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias"));
}
if (conv->operand_count() > 3) {
new_operands.push_back(
SplitAtDim(Parameter(&b, 3, conv->operand(3)->shape(), "side_input"),
dnums->output_feature_dimension(), vect_size));
}
if (conv->operand_count() > 4) {
return InvalidArgument(
"Don't understand a conv with more than 4 arguments: %s",
conv->ToString());
}
const auto& debug_options = conv->GetModule()->config().debug_options();
bool use_reordering =
input_shape.element_type() == xla::S8 && vect_size == 32 &&
debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&
cudnn_version >= se::dnn::VersionInfo{8, 3, 0};
if (use_reordering) {
new_operands[1] = filter;
TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));
dnums = &conv->convolution_dimension_numbers();
}
Shape new_output_shape = SplitShapeAtDim(
output_shape, dnums->output_feature_dimension(), vect_size);
XlaOp new_conv = CustomCallWithConvDnums(
&b, conv->custom_call_target(), new_operands,
ShapeUtil::MakeTupleShape(
{new_output_shape, ShapeUtil::MakeShape(U8, {0})}),
{},
conv->raw_backend_config_string(), false,
{}, nullptr,
conv->window(),
VectorizeDnums(*dnums, use_reordering));
XlaOp new_conv_result = GetTupleElement(new_conv, 0);
XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);
XlaOp conv_result_collapsed =
Collapse(new_conv_result, {dnums->output_feature_dimension(),
dnums->output_feature_dimension() + 1});
TF_ASSIGN_OR_RETURN(
HloComputation * new_conv_comp,
BuilderToHloComputation(
b, Tuple(&b, {conv_result_collapsed, new_conv_scratch}),
conv->parent()));
VLOG(1) << "Vectorized conv to: " << new_conv_comp->ToString();
TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(
conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),
new_conv_comp)));
return true;
}
}
absl::StatusOr<bool> CudnnVectorizeConvolutions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {
bool local_changed = false;
if (compute_capability_.IsAtLeast(7, 5)) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryRevectorizeConv(compute_capability_, cudnn_version_, conv, 32));
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryVectorizeConv(compute_capability_, cudnn_version_, conv, 32));
}
}
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryVectorizeConv(compute_capability_, cudnn_version_, conv, 4));
}
changed |= local_changed;
}
}
return changed;
}
}
} | #include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include <cstdint>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class CudnnVectorizeConvolutionsTest : public HloTestBase {
protected:
absl::StatusOr<bool> Run(std::pair<int, int> compute_capability,
HloModule* module) {
CudnnVectorizeConvolutions pass(
se::CudaComputeCapability{compute_capability.first,
compute_capability.second},
se::dnn::VersionInfo(8, 3, 0));
TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&pass, module));
CallInliner inliner;
TF_RETURN_IF_ERROR(RunHloPass(&inliner, module).status());
return changed;
}
};
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward",
backend_config="{bar: 0}"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 10, 4}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 10, 4, 44}))
.WithConvDnums("b01f?_01i?o->b01f?"))
.WithShape(S8, {10, 20, 30, 11, 4})),
m::Op())));
EXPECT_EQ(conv->raw_backend_config_string(), "{bar: 0}");
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4UnsupportedFilterType) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = f32[2,2,40,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward",
backend_config="{bar: 0}"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4NCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,48,20,30] parameter(0)
filter = s8[48,44,2,2] parameter(1)
ROOT result = (s8[10,44,20,30], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 12, 4, 20, 30}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {12, 4, 44, 2, 2}))
.WithConvDnums("bf?01_i?o01->bf?01"))
.WithShape(S8, {10, 11, 4, 20, 30})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, IncrementAllDnums) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[16,16,16,16] parameter(0)
filter = s8[16,16,3,3] parameter(1)
ROOT result = (s8[16,16,16,16], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=fb01_i01o->fb01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {4, 4, 16, 16, 16}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {4, 4, 16, 3, 3}))
.WithConvDnums("f?b01_i?01o->f?b01"))
.WithShape(S8, {4, 4, 16, 16, 16})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, FilterDnums) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[1,20,9,9] parameter(0)
filter = s8[3,3,20,32] parameter(1)
ROOT result = (s8[1,32,9,9], u8[0]) custom-call(s8[1,20,9,9] input, s8[3,3,20,32] filter),
window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {1, 5, 4, 9, 9}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 3, 5, 4, 32}))
.WithConvDnums("bf?01_01i?o->bf?01"))
.WithShape(S8, {1, 8, 4, 9, 9})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
CudnnVectorizeConvolutions pass(
{7, 5},
se::dnn::VersionInfo{8, 3, 0});
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsS32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (s32[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsF32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (f32[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 2, 8, 4, 16, 4, 2}))
.WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{2, 0, 1, 5, 7, 3, 6,
4});
}))
.WithShape(S8, {128, 2, 2, 2, 32})))
.WithShape(S8, {10, 20, 30, 4, 32})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, BiasAndSideInput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
bias = f32[128] parameter(2)
side_input = s8[10,20,30,64] parameter(3)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(2))
.WithShape(F32, {4, 4, 2, 4}))
.WithShape(F32, {4, 2, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 2, 1, 3});
}))
.WithShape(F32, {128}),
m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 20, 30, 2, 32})))
.WithShape(S8, {10, 20, 30, 4, 32})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, InputNHWC_OutputNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
bias = f32[128] parameter(2)
side_input = s8[10,128,20,30] parameter(3)
ROOT result = (s8[10,128,20,30], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(2))
.WithShape(F32, {4, 4, 2, 4}))
.WithShape(F32, {4, 2, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 2, 1, 3});
}))
.WithShape(F32, {128}),
m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 4, 32, 20, 30})))
.WithShape(S8, {10, 4, 32, 20, 30})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 16, 4}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 16, 4, 128})))
.WithShape(S8, {10, 20, 30, 32, 4})),
m::Op())));
EXPECT_FALSE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,16,4] parameter(0)
filter = s8[3,5,16,192,4] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[10,20,30,16,4] parameter(3)
ROOT result = (s8[10,20,30,48,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 5, 2, 8, 24, 4, 2, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{2, 0, 1, 4, 6, 3, 5, 7});
}))
.WithShape(S8, {192, 2, 3, 5, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 32}))
.WithConvDnums("b01f?_oi01?->b01f?"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 8, 4}))
.WithShape(S8, {10, 20, 30, 6, 8, 4}))
.WithShape(S8, {10, 20, 30, 48, 4}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32NCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,16,20,30,4] parameter(0)
filter = s8[16,128,2,2,4] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[10,16,20,30,4] parameter(3)
ROOT result = (s8[10,32,20,30,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 2, 8, 20, 30, 4}))
.WithShape(S8, {10, 2, 20, 30, 8, 4}))
.WithShape(S8, {10, 2, 20, 30, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 8, 16, 4, 2, 2, 2, 4}))
.WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 5, 6, 2, 4, 1, 3, 7});
}))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 2, 8, 20, 30, 4}))
.WithShape(S8, {10, 2, 20, 30, 8, 4}))
.WithShape(S8, {10, 2, 20, 30, 32}))
.WithConvDnums("bf01_oi01->bf01"))
.WithShape(S8, {10, 4, 20, 30, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 4, 20, 30, 8, 4}))
.WithShape(S8, {10, 4, 8, 20, 30, 4}))
.WithShape(S8, {10, 32, 20, 30, 4}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32VectorDimFirst) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[4,10,20,30,16] parameter(0)
filter = s8[4,3,5,16,192] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[4,10,20,30,16] parameter(3)
ROOT result = (s8[4,10,20,30,48], u8[0]) custom-call(input, filter, bias, side_input),
window={size=3x5}, dim_labels=?b01f_?01io->?b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {4, 10, 20, 30, 2, 8}))
.WithShape(S8, {8, 4, 10, 20, 30, 2}))
.WithShape(S8, {32, 10, 20, 30, 2}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {4, 3, 5, 2, 8, 24, 4, 2}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{3, 1, 2, 5, 7, 4, 6, 0});
}))
.WithShape(S8, {192, 2, 3, 5, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {4, 10, 20, 30, 2, 8}))
.WithShape(S8, {8, 4, 10, 20, 30, 2}))
.WithShape(S8, {32, 10, 20, 30, 2}))
.WithConvDnums("?b01f_oi01->?b01f"))
.WithShape(S8, {32, 10, 20, 30, 6});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {8, 4, 10, 20, 30, 6}))
.WithShape(S8, {4, 10, 20, 30, 6, 8}))
.WithShape(S8, {4, 10, 20, 30, 48}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorize4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,16,4] parameter(0)
filter = s8[2,2,16,128,4] parameter(1)
bias = f32[10] parameter(2)
side_input = s8[10,20,30,16,4] parameter(3)
ROOT result = (s8[10,20,30,32,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize16To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,4,16] parameter(0)
filter = s8[3,5,4,192,16] parameter(1)
ROOT result = (s8[10,20,30,12,16], u8[0]) custom-call(input, filter),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto filter_pat =
m::Reshape(
m::Transpose(
m::Reshape(m::Parameter(1)).WithShape(S8, {3, 5, 2, 2, 192, 16}))
.WithShape(S8, {3, 5, 2, 192, 2, 16}))
.WithShape(S8, {3, 5, 2, 192, 32});
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 2, 16}))
.WithShape(S8, {10, 20, 30, 2, 2, 16}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(filter_pat)
.WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))
.WithShape(S8, {192, 2, 3, 5, 32}))
.WithConvDnums("b01f_oi01->b01f"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 2, 16}))
.WithShape(S8, {10, 20, 30, 6, 2, 16}))
.WithShape(S8, {10, 20, 30, 12, 16}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeMixedTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,8,8] parameter(0)
filter = s8[3,5,2,192,32] parameter(1)
ROOT result = (s8[10,20,30,96,2], u8[0]) custom-call(input, filter),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 4, 8}))
.WithShape(S8, {10, 20, 30, 2, 4, 8}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))
.WithShape(S8, {192, 2, 3, 5, 32}))
.WithConvDnums("b01f_oi01->b01f"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 16, 2}))
.WithShape(S8, {10, 20, 30, 6, 16, 2}))
.WithShape(S8, {10, 20, 30, 96, 2}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
}
}
} | 2,048 |
#ifndef XLA_SERVICE_GPU_TRITON_SUPPORT_H_
#define XLA_SERVICE_GPU_TRITON_SUPPORT_H_
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
using CodegenDecision = FusionDecision;
namespace legacy_triton {
bool IsDistributiveOverAddition(const HloInstruction& hlo);
std::vector<HloOpcode> TritonSupportedUnaryElementwiseUpToFloatNormalization(
PrimitiveType);
std::vector<HloOpcode> TritonSupportedBinaryElementwiseUpToFloatNormalization(
PrimitiveType);
std::vector<HloOpcode> TritonSupportedTernaryElementwiseUpToFloatNormalization(
PrimitiveType);
bool IsTritonSupportedDataType(PrimitiveType, const se::GpuComputeCapability&);
bool IsTritonSupportedElementwiseUpToFloatNormalization(HloOpcode,
PrimitiveType);
CodegenDecision CanTritonHandleGEMM(
const HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version);
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version);
CodegenDecision IsTritonSupportedDynamicSlice(
const HloDynamicSliceInstruction& instr);
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version);
}
}
#endif
#include "xla/service/gpu/triton_support.h"
#include <cstdint>
#include <iterator>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace xla {
namespace gpu {
namespace legacy_triton {
bool IsDistributiveOverAddition(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kMultiply ||
hlo.opcode() == HloOpcode::kNegate ||
hlo.opcode() == HloOpcode::kBitcast ||
hlo.opcode() == HloOpcode::kReshape || hlo.opcode() == HloOpcode::kCopy ||
hlo.opcode() == HloOpcode::kTranspose ||
hlo.opcode() == HloOpcode::kConvert ||
hlo.opcode() == HloOpcode::kBroadcast ||
hlo.opcode() == HloOpcode::kSlice) {
return true;
}
return false;
}
bool IsTritonSupportedDotOutputType(
const PrimitiveType t, const se::GpuComputeCapability& gpu_version) {
switch (t) {
case F16:
case F32:
return true;
case F8E5M2:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return cc.IsAtLeastAmpere();
},
[](const se::RocmComputeCapability& cc) {
return false;
}},
gpu_version);
case F8E4M3FN:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return cc.IsAtLeastHopper();
},
[](const se::RocmComputeCapability& cc) {
return false;
}},
gpu_version);
case BF16:
return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return true;
},
[](const se::RocmComputeCapability& cc) {
return cc.has_bf16_dtype_support();
}},
gpu_version);
default:
return false;
}
};
bool IsTritonSupportedDataType(PrimitiveType type,
const se::GpuComputeCapability& gpu_version) {
if (IsTritonSupportedDotOutputType(type, gpu_version)) {
return true;
}
switch (type) {
case PRED:
case S8:
case S16:
case S32:
return true;
default:
return false;
}
}
std::vector<HloOpcode> TritonSupportedUnaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
std::vector<HloOpcode> ret = {HloOpcode::kConvert};
if (element_type == PrimitiveType::PRED) {
ret.push_back(HloOpcode::kNot);
return ret;
}
ret.push_back(HloOpcode::kAbs);
ret.push_back(HloOpcode::kNegate);
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F64) {
absl::c_copy(std::vector<HloOpcode>{HloOpcode::kCos, HloOpcode::kExp,
HloOpcode::kExpm1, HloOpcode::kFloor,
HloOpcode::kCeil, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt,
HloOpcode::kSin, HloOpcode::kSqrt,
HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf},
std::back_inserter(ret));
}
return ret;
}
std::vector<HloOpcode> TritonSupportedBinaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kAnd, HloOpcode::kOr, HloOpcode::kXor,
HloOpcode::kCompare};
}
std::vector<HloOpcode> ret = {HloOpcode::kAdd, HloOpcode::kCompare,
HloOpcode::kMaximum, HloOpcode::kMinimum,
HloOpcode::kMultiply, HloOpcode::kSubtract};
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F64) {
ret.push_back(HloOpcode::kAtan2);
ret.push_back(HloOpcode::kDivide);
ret.push_back(HloOpcode::kPower);
}
return ret;
}
std::vector<HloOpcode> TritonSupportedTernaryElementwiseUpToFloatNormalization(
PrimitiveType element_type) {
return {HloOpcode::kSelect, HloOpcode::kClamp};
}
bool IsTritonSupportedElementwiseUpToFloatNormalization(
HloOpcode opcode, PrimitiveType element_type) {
return absl::c_linear_search(
TritonSupportedUnaryElementwiseUpToFloatNormalization(
element_type),
opcode) ||
absl::c_linear_search(
TritonSupportedBinaryElementwiseUpToFloatNormalization(
element_type),
opcode) ||
absl::c_linear_search(
TritonSupportedTernaryElementwiseUpToFloatNormalization(
element_type),
opcode);
}
CodegenDecision CanTritonHandleElementwise(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
if (!IsTritonSupportedDataType(instr.shape().element_type(), gpu_version)) {
return "Unsupported output data type.";
}
for (const HloInstruction* operand : instr.operands()) {
if (!IsTritonSupportedDataType(operand->shape().element_type(),
gpu_version)) {
return "Unsupported input data type.";
}
}
if (instr.opcode() == HloOpcode::kConstant) {
return CodegenDecision{};
} else if (!IsTritonSupportedElementwiseUpToFloatNormalization(
instr.opcode(), instr.operand(0)->shape().element_type())) {
return "Unsupported elementwise operation.";
}
return CodegenDecision{};
}
bool IsDotAlgorithmSupportedByTriton(
PrecisionConfig::Algorithm algorithm,
const se::GpuComputeCapability& gpu_version) {
auto cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
auto rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
switch (algorithm) {
case PrecisionConfig::ALG_DOT_TF32_TF32_F32:
if (cuda_compute_capability) {
return true;
}
return false;
case PrecisionConfig::ALG_DOT_BF16_BF16_F32:
case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3:
case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6:
if (cuda_compute_capability) {
return true;
}
if (rocm_compute_capability) {
return rocm_compute_capability->has_bf16_dtype_support();
}
return false;
case PrecisionConfig::ALG_DOT_F16_F16_F32:
case PrecisionConfig::ALG_DOT_F32_F32_F32:
default:
return false;
}
}
CodegenDecision CanTritonHandleGEMM(
const HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version) {
auto cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
auto rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
CHECK(cuda_compute_capability || rocm_compute_capability);
if (dot.precision_config().algorithm() == PrecisionConfig::ALG_UNSET) {
if (!tsl::tensor_float_32_execution_enabled() ||
absl::c_any_of(dot.precision_config().operand_precision(),
[](int x) { return x != PrecisionConfig::DEFAULT; })) {
return "Having non-default operand precisions or TensorFloat-32 disabled "
"for Dot op with unset algorithm.";
}
} else {
if (!IsDotAlgorithmSupportedByTriton(dot.precision_config().algorithm(),
gpu_version)) {
return "Unsupported algorithm on the current device(s).";
}
}
if (!IsTritonSupportedDotOutputType(dot.shape().element_type(),
gpu_version)) {
return "Unsupported output data type for Dot op.";
}
if (!IsTritonSupportedDataType(dot.operand(0)->shape().element_type(),
gpu_version) ||
!IsTritonSupportedDataType(dot.operand(1)->shape().element_type(),
gpu_version)) {
return "Unsupported input data type for Dot op.";
}
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
if (dim_numbers.lhs_batch_dimensions().size() > 1) {
return "Multiple batch dimensions.";
}
return CodegenDecision{};
}
CodegenDecision CanTritonHandleReduce(
const HloReduceInstruction& reduce,
const se::GpuComputeCapability& gpu_version) {
if (!IsTritonSupportedDataType(reduce.shape().element_type(), gpu_version)) {
return "Unsupported output data type for Reduce op.";
}
for (const HloInstruction* operand : reduce.operands()) {
if (!IsTritonSupportedDataType(operand->shape().element_type(),
gpu_version)) {
return "Unsupported input data type for Reduce op.";
}
}
bool is_triton_supported_reduction_computation = [&]() {
return absl::c_all_of(
reduce.to_apply()->instructions(), [&](const HloInstruction* instr) {
return IsTritonSupportedInstruction(*instr, gpu_version);
});
}();
if (!is_triton_supported_reduction_computation) {
return "Unsupported reduction computation by Triton.";
}
if (reduce.dimensions().size() == 1 &&
reduce.dimensions().front() == reduce.operand(0)->shape().rank() - 1 &&
reduce.operand_count() == 2) {
const HloInstruction* operand = reduce.operand(1);
if (operand->opcode() == HloOpcode::kConvert) {
if (operand->operand(0)->opcode() == HloOpcode::kConstant &&
operand->operand(0)->shape().element_type() == BF16 &&
operand->shape().element_type() == F32) {
return CodegenDecision{};
}
} else if (operand->opcode() == HloOpcode::kConstant) {
return CodegenDecision{};
}
return "Reduction init value should be a constant or a convert of a "
"constant.";
}
return "Reduction is not a row-reduction of a single operand.";
}
bool NoNonContractingDimension(const HloDotInstruction& dot) {
const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers();
if (dim_numbers.lhs_batch_dimensions().size() +
dim_numbers.lhs_contracting_dimensions().size() ==
dot.operand(0)->shape().rank() ||
dim_numbers.rhs_batch_dimensions().size() +
dim_numbers.rhs_contracting_dimensions().size() ==
dot.operand(1)->shape().rank()) {
return true;
}
return false;
}
CodegenDecision IsTritonSupportedDynamicSlice(
const HloDynamicSliceInstruction& instr) {
for (const HloInstruction* index_operand : instr.index_operands()) {
switch (index_operand->shape().element_type()) {
case S8:
case S16:
case S32:
break;
default:
return CodegenDecision(
"Dynamic slice is only supported with S8, S16, or S32 indices.");
}
}
const HloInstruction* input = instr.operand(0);
Layout in_layout = input->shape().layout();
int64_t majormost_dim_id =
in_layout.minor_to_major(in_layout.minor_to_major_size() - 1);
for (int i = 0; i < input->shape().dimensions_size(); ++i) {
if (i == majormost_dim_id) {
continue;
} else if (input->shape().dimensions(i) != instr.slice_sizes(i)) {
return CodegenDecision(
"Unsupported dynamic slice on non-major-most dimension.");
}
}
return CodegenDecision{};
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
if (instr.IsElementwise()) {
return CanTritonHandleElementwise(instr, gpu_version);
}
switch (instr.opcode()) {
case HloOpcode::kDot: {
auto* dot = Cast<HloDotInstruction>(&instr);
if (NoNonContractingDimension(*dot)) {
return "No non-contracting dimensions.";
}
return CanTritonHandleGEMM(*dot, gpu_version);
}
case HloOpcode::kReduce: {
return CanTritonHandleReduce(*Cast<HloReduceInstruction>(&instr),
gpu_version);
}
case HloOpcode::kTuple: {
if (instr.IsRoot()) {
return CodegenDecision{};
}
return "Only supports root tuples.";
}
case HloOpcode::kDynamicSlice: {
return IsTritonSupportedDynamicSlice(
*Cast<HloDynamicSliceInstruction>(&instr));
}
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kSlice:
case HloOpcode::kReshape:
case HloOpcode::kPad:
case HloOpcode::kConcatenate:
case HloOpcode::kParameter:
case HloOpcode::kBroadcast:
return CodegenDecision{};
default:
break;
}
return "Unsupported opcode.";
}
}
namespace {
absl::flat_hash_set<HloOpcode> TritonSupportedUnaryElementwiseOps(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kConvert, HloOpcode::kNot};
}
absl::flat_hash_set<HloOpcode> ret = {HloOpcode::kConvert, HloOpcode::kAbs,
HloOpcode::kNegate};
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::F64) {
absl::flat_hash_set<HloOpcode> additional_opcodes{
HloOpcode::kCos, HloOpcode::kExp, HloOpcode::kExpm1,
HloOpcode::kFloor, HloOpcode::kCeil, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt, HloOpcode::kSin,
HloOpcode::kSqrt, HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
if (element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F16) {
absl::flat_hash_set<HloOpcode> additional_opcodes{HloOpcode::kFloor,
HloOpcode::kCeil};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
return ret;
}
absl::flat_hash_set<HloOpcode> TritonSupportedBinaryElementwiseOps(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kAnd, HloOpcode::kOr, HloOpcode::kXor,
HloOpcode::kCompare};
}
absl::flat_hash_set<HloOpcode> ret = {
HloOpcode::kAdd, HloOpcode::kCompare, HloOpcode::kMaximum,
HloOpcode::kMinimum, HloOpcode::kMultiply, HloOpcode::kSubtract};
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::F64) {
absl::flat_hash_set<HloOpcode> additional_opcodes{
HloOpcode::kAtan2, HloOpcode::kDivide, HloOpcode::kPower};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
return ret;
}
absl::flat_hash_set<HloOpcode> TritonSupportedTernaryElementwiseOps(
PrimitiveType element_type) {
return {HloOpcode::kSelect, HloOpcode::kClamp};
}
bool IsTritonSupportedElementwise(HloOpcode opcode,
PrimitiveType element_type) {
return TritonSupportedUnaryElementwiseOps(element_type).contains(opcode) ||
TritonSupportedBinaryElementwiseOps(element_type).contains(opcode) ||
TritonSupportedTernaryElementwiseOps(element_type).contains(opcode);
}
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
bool output_type_is_supported = legacy_triton::IsTritonSupportedDataType(
instr.shape().element_type(), gpu_version);
if (!output_type_is_supported) {
return "Unsupported output data type.";
}
bool input_types_are_supported =
absl::c_all_of(instr.operands(), [&](const HloInstruction* operand) {
return legacy_triton::IsTritonSupportedDataType(
operand->shape().element_type(), gpu_version);
});
if (!input_types_are_supported) {
return "Unsupported input data type.";
}
if (instr.IsElementwise()) {
if (!IsTritonSupportedElementwise(instr.opcode(),
instr.shape().element_type())) {
return "Unsupported elementwise operation.";
}
return CodegenDecision{};
}
switch (instr.opcode()) {
case HloOpcode::kReduce: {
return legacy_triton::CanTritonHandleReduce(
*Cast<HloReduceInstruction>(&instr), gpu_version);
}
case HloOpcode::kTranspose:
case HloOpcode::kSlice:
case HloOpcode::kParameter:
case HloOpcode::kBroadcast:
return CodegenDecision{};
default:
VLOG(1) << "Unsupported instruction: " << instr.ToString();
break;
}
return "Unsupported opcode.";
}
}
} | #include "xla/service/gpu/triton_support.h"
#include <cstdint>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/ir_emitter_triton.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/triton_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::Not;
using ::testing::status::IsOk;
auto AllXlaDataTypes() {
std::vector<xla::PrimitiveType> xla_data_types;
std::vector<xla::PrimitiveType> to_filter_out = {PRIMITIVE_TYPE_INVALID,
TUPLE, OPAQUE_TYPE, TOKEN};
const tsl::protobuf::EnumDescriptor* xla_type_descriptor =
tsl::protobuf::GetEnumDescriptor<xla::PrimitiveType>();
for (int enum_ix = 0; enum_ix < xla_type_descriptor->value_count();
++enum_ix) {
xla::PrimitiveType xla_type = static_cast<xla::PrimitiveType>(
xla_type_descriptor->value(enum_ix)->number());
if (!absl::c_linear_search(to_filter_out, xla_type)) {
xla_data_types.push_back(xla_type);
}
}
return ::testing::ValuesIn(xla_data_types);
}
auto AllDevicesToTest() {
using cc = se::GpuComputeCapability;
#ifdef TENSORFLOW_USE_ROCM
se::RocmComputeCapability example_rocm_compute_capability =
TestGpuDeviceInfo::AMDMI210DeviceInfo().rocm_compute_capability();
return ::testing::Values(cc(example_rocm_compute_capability));
#else
return ::testing::Values(cc(se::CudaComputeCapability::Ampere()),
cc(se::CudaComputeCapability::Hopper()));
#endif
}
auto AllTestCombinationsForOpcodes(std::vector<HloOpcode>&& opcodes) {
return ::testing::Combine(AllXlaDataTypes(), ::testing::ValuesIn(opcodes),
AllDevicesToTest());
}
class TritonSupportTest : public TritonSupportTestBase {
public:
void RunSupportTest(TestedInstruction ti,
std::vector<int64_t> output_tile_sizes,
se::GpuComputeCapability cc,
bool skip_failure_branch_to_avoid_crash = false) {
BlockLevelParameters block_level_parameters =
FromOutputTileSizes(std::move(output_tile_sizes));
const se::DeviceDescription dev_info =
std::holds_alternative<se::CudaComputeCapability>(cc)
? TestGpuDeviceInfo::RTXA6000DeviceInfo(cc)
: TestGpuDeviceInfo::AMDMI210DeviceInfo();
if (IsTritonSupportedInstruction(ti.Instruction(), cc)) {
EXPECT_THAT(
TritonWrapper("test_fn", &ti.TritonFusion(), cc, dev_info,
block_level_parameters, &llvm_module_, mlir_context_),
IsOk());
} else {
if (!skip_failure_branch_to_avoid_crash) {
EXPECT_THAT(
TritonWrapper("test_fn", &ti.TritonFusion(), cc, dev_info,
block_level_parameters, &llvm_module_, mlir_context_),
Not(IsOk()));
}
}
}
};
class TritonSupportTestWithParam
: public TritonSupportTest,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, HloOpcode, se::GpuComputeCapability>> {};
using BitcastOrReshapeTest = TritonSupportTestWithParam;
TEST_P(BitcastOrReshapeTest, IsTritonSupportedBitcastOrReshape) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[1,16,4]{2,1,0} parameter(0)
ROOT bitcast_or_reshape = $0[64]{0} $1(parameter_0)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {16}, cc);
}
INSTANTIATE_TEST_SUITE_P(BitcastOrReshapeTestSuite, BitcastOrReshapeTest,
AllTestCombinationsForOpcodes({HloOpcode::kBitcast,
HloOpcode::kReshape}),
TritonSupportTestTypeOpcodeAndDeviceToString);
using UnaryElementwiseTest = TritonSupportTestWithParam;
TEST_P(UnaryElementwiseTest, IsTritonSupportedUnaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[33,68]{1,0} parameter(0)
unary = $0[33,68]{1,0} $1(parameter_0)
ROOT convert = f32[33,68]{1,0} convert(unary)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
INSTANTIATE_TEST_SUITE_P(
UnaryElementwiseTestSuite, UnaryElementwiseTest,
::testing::Combine(::testing::Values(S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kConvert, HloOpcode::kAbs,
HloOpcode::kNegate),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(
UnaryPREDTestSuite, UnaryElementwiseTest,
::testing::Combine(::testing::Values(PRED),
::testing::Values(HloOpcode::kConvert, HloOpcode::kNot),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(
UnaryMathTestSuite, UnaryElementwiseTest,
::testing::Combine(::testing::Values(F16, F32, BF16),
::testing::Values(HloOpcode::kCeil, HloOpcode::kCos,
HloOpcode::kExp, HloOpcode::kExpm1,
HloOpcode::kFloor, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt,
HloOpcode::kSin, HloOpcode::kSqrt,
HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using BinaryElementwiseTest = TritonSupportTestWithParam;
TEST_P(BinaryElementwiseTest, IsTritonSupportedBinaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[11,63]{1,0} parameter(0)
parameter_1 = $0[11,63]{1,0} parameter(1)
ROOT binary = $0[11,63]{1,0} $1(parameter_0, parameter_1)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
bool skip_failure_branch_to_avoid_crash = false;
if (primitive_util::BitWidth(data_type) == 16 &&
opcode == HloOpcode::kDivide) {
skip_failure_branch_to_avoid_crash = true;
}
RunSupportTest(std::move(ti), {1, 32}, cc,
skip_failure_branch_to_avoid_crash);
}
INSTANTIATE_TEST_SUITE_P(
BinaryElementwiseTestSuite, BinaryElementwiseTest,
::testing::Combine(::testing::Values(S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kAdd, HloOpcode::kMultiply,
HloOpcode::kMaximum,
HloOpcode::kMinimum,
HloOpcode::kSubtract),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(BinaryPREDTestSuite, BinaryElementwiseTest,
::testing::Combine(::testing::Values(PRED),
::testing::Values(HloOpcode::kAnd,
HloOpcode::kOr,
HloOpcode::kXor),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
INSTANTIATE_TEST_SUITE_P(
BinaryMathTestSuite, BinaryElementwiseTest,
::testing::Combine(::testing::Values(F16, F32, BF16),
::testing::Values(HloOpcode::kAtan2, HloOpcode::kDivide,
HloOpcode::kPower),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using CompareTest = TritonSupportTestWithParam;
TEST_P(CompareTest, IsTritonSupportedCompare) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[11,63]{1,0} parameter(0)
parameter_1 = $0[11,63]{1,0} parameter(1)
compare = pred[11,63]{1,0} $1(parameter_0, parameter_1), direction=GE
ROOT convert = f32[11,63]{1,0} convert(compare)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
INSTANTIATE_TEST_SUITE_P(
CompareTestSuite, CompareTest,
::testing::Combine(::testing::Values(PRED, S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kCompare),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using TernaryElementwiseTest = TritonSupportTestWithParam;
TEST_P(TernaryElementwiseTest, IsTritonSupportedTernaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[13,63]{1,0} parameter(0)
parameter_1 = $0[13,63]{1,0} parameter(1)
parameter_2 = pred[13,63]{1,0} parameter(2)
ternary = $0[13,63]{1,0} $1(parameter_2, parameter_0, parameter_1)
ROOT convert = f32[13,63]{1,0} convert(ternary)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
INSTANTIATE_TEST_SUITE_P(
TernaryElementwiseTestSuite, TernaryElementwiseTest,
::testing::Combine(::testing::Values(PRED, S8, S16, S32, F16, F32, BF16),
::testing::Values(HloOpcode::kSelect),
AllDevicesToTest()),
TritonSupportTestTypeOpcodeAndDeviceToString);
using ReduceTest = TritonSupportTestWithParam;
TEST_P(ReduceTest, IsTritonSupportedReduction) {
GTEST_SKIP() << "TODO(b/348565795): this test is currently broken.";
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[125]{0} reduce(parameter_0, constant_0),
dimensions={1}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(
ReduceTest,
UnsupportedReduceWithMoreThanOneReduceDimensionsFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[2,125,127]{2,1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[2]{0} reduce(parameter_0, constant_0),
dimensions={1,2}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest,
UnsupportedReduceWithNonLastReduceDimensionFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[127]{0} reduce(parameter_0, constant_0), dimensions={0}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest,
UnsupportedReduceWithMoreThanOneOperandsFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
Arg_2 = $0[] parameter(2)
Arg_3 = $0[] parameter(3)
add_0 = $0[] add(Arg_0, Arg_2)
add_1 = $0[] add(Arg_1, Arg_3)
ROOT pair = ($0[], $0[]) tuple(add_0, add_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
constant_0 = $0[] constant($1)
tuple = ($0[125]{0}, $0[125]{0}) reduce(
parameter_0, parameter_0, constant_0, constant_0),
dimensions={1}, to_apply=add
ROOT reduce = $0[125]{0} get-tuple-element(tuple), index=0
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest,
UnsupportedReduceWithNonConstReduceValueFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
init = $0[] parameter(1)
ROOT reduce = $0[125]{0} reduce(parameter_0, init), dimensions={1}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest, UnsupportedReductionComputationFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
custom_call {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT custom_call = $0[] custom-call(Arg_0, Arg_1), custom_call_target="foo"
}
ENTRY triton_computation {
parameter_0 = $0[125,127]{1,0} parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[125]{0} reduce(parameter_0, constant_0),
dimensions={1}, to_apply=custom_call
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
INSTANTIATE_TEST_SUITE_P(ReduceTestSuite, ReduceTest,
AllTestCombinationsForOpcodes({HloOpcode::kReduce}),
TritonSupportTestTypeOpcodeAndDeviceToString);
}
}
} | 2,049 |
#ifndef XLA_SERVICE_GPU_IR_EMITTER_TRITON_H_
#define XLA_SERVICE_GPU_IR_EMITTER_TRITON_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Module.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/PassManager.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/launch_dim.h"
#include "triton/Dialect/Triton/IR/Dialect.h"
#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h"
namespace xla {
namespace gpu {
namespace mt = ::mlir::triton;
struct TritonWrapperResult {
int64_t shmem_bytes = 0;
std::optional<se::ClusterDim> cluster_dim;
};
absl::Status EmitGeneric(mlir::OpBuilder b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloFusionInstruction* fusion,
mlir::triton::FuncOp fn,
const BlockLevelParameters& block_level_parameters);
absl::StatusOr<LaunchDimensions> GetMatMulLaunchDimensions(
const TritonFusionAnalysis& analysis, const HloFusionAdaptor& fusion,
const TritonGemmConfig& config);
absl::Status EmitMatMul(mlir::OpBuilder b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloFusionInstruction* fusion,
mlir::triton::FuncOp fn,
const BlockLevelParameters& block_level_parameters);
absl::Status EmitSoftMax(mlir::OpBuilder b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloFusionInstruction* fusion,
mlir::triton::FuncOp fn,
const BlockLevelParameters& block_level_parameters);
using TritonIrEmitter = std::function<absl::Status(
mlir::OpBuilder, absl::string_view, const se::DeviceDescription&,
const HloFusionInstruction*, mlir::triton::FuncOp,
const BlockLevelParameters&)>;
void LoadMlirDialectsForTriton(mlir::MLIRContext& mlir_context);
absl::StatusOr<TritonWrapperResult> TritonWrapper(
absl::string_view fn_name, const HloFusionInstruction* fusion,
const se::GpuComputeCapability& cc,
const se::DeviceDescription& device_info,
const BlockLevelParameters& block_level_parameters,
llvm::Module* llvm_module, mlir::MLIRContext& mlir_context);
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> CreateTritonModule(
absl::string_view fn_name, const HloFusionInstruction* fusion,
const se::DeviceDescription& device_info,
const BlockLevelParameters& block_level_parameters,
mlir::MLIRContext& mlir_context);
absl::StatusOr<TritonWrapperResult> CompileTritonToLLVM(
const HloModuleConfig& hlo_config, absl::string_view hlo_module_name,
const se::GpuComputeCapability& cc,
const se::DeviceDescription& device_info,
const BlockLevelParameters& block_level_parameters,
mlir::ModuleOp triton_module, llvm::Module* llvm_module,
mlir::MLIRContext& mlir_context);
absl::Status CreateTritonPipeline(
mlir::OpPassManager& pm, const se::GpuComputeCapability& cc,
const BlockLevelParameters& block_level_parameters,
mt::nvidia_gpu::ClusterInfo& out_cluster_info);
std::string GetLibdevicePath(const HloModuleConfig& hlo_config,
const se::DeviceDescription& device_info);
namespace ir_emitter_triton_internal {
struct MakeTensorPtrOpAndBoundaryChecks {
mt::MakeTensorPtrOp op;
llvm::SmallVector<int32_t> boundary_checks;
};
MakeTensorPtrOpAndBoundaryChecks CreateMakeTensorPtrOp(
mlir::ImplicitLocOpBuilder& b, mlir::Value pid,
const TiledHloInstruction& tiled_hlo, mlir::Value argument_block);
}
}
}
#endif
#include "xla/service/gpu/ir_emitter_triton.h"
#include <array>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <system_error>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h"
#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h"
#include "mlir/Conversion/IndexToLLVM/IndexToLLVM.h"
#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/InlinerExtension.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/ExecutionEngine/OptUtils.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Export.h"
#include "mlir/Transforms/Passes.h"
#include "xla/autotuning.pb.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/map_mhlo_to_scalar_op.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/fusions/mlir/passes.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/translate/hlo_to_mhlo/hlo_function_importer.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tensor_float_32_utils.h"
#include "triton/Conversion/TritonGPUToLLVM/Passes.h"
#include "triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h"
#include "triton/Dialect/Triton/IR/Dialect.h"
#include "triton/Dialect/Triton/IR/Types.h"
#include "triton/Dialect/TritonGPU/IR/Dialect.h"
#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h"
namespace xla {
namespace gpu {
namespace ma = ::mlir::arith;
namespace mm = ::mlir::math;
namespace ml = ::mlir::LLVM;
namespace mn = ::mlir::NVVM;
namespace mt = ::mlir::triton;
using ::llvm::SmallVector;
using mlir::ArrayRef;
using mlir::ImplicitLocOpBuilder;
using ::mlir::ShapedType;
using ::mlir::Type;
using ::mlir::Value;
using mlir::ValueRange;
namespace {
absl::StatusOr<Type> TritonType(mlir::OpBuilder b, PrimitiveType t) {
switch (t) {
case F64:
return b.getF64Type();
case F32:
return b.getF32Type();
case F16:
return b.getF16Type();
case BF16:
return b.getBF16Type();
case S64:
return b.getI64Type();
case S32:
return b.getI32Type();
case S16:
return b.getI16Type();
case PRED:
return b.getI1Type();
case S8:
return b.getI8Type();
case F8E5M2:
return b.getFloat8E5M2Type();
case F8E4M3FN:
return b.getFloat8E4M3FNUZType();
default:
return absl::UnimplementedError(
absl::StrCat("This type is not supported yet: ",
primitive_util::LowercasePrimitiveTypeName(t)));
}
}
Type StorageType(mlir::OpBuilder b, Type t) {
if (t.isInteger(1)) {
return b.getI8Type();
}
return t;
}
template <typename T>
T ScalarConstantValue(const HloInstruction& instr, PrimitiveType dst_type) {
CHECK(hlo_query::IsScalarConstant(&instr));
absl::StatusOr<Literal> converted = instr.literal().Convert(dst_type);
TF_CHECK_OK(converted.status());
return converted.value().GetFirstElement<T>();
}
template <typename T>
ma::ConstantOp CreateConst(ImplicitLocOpBuilder b, Type type, T value) {
if (mlir::isa<mlir::IntegerType>(type)) {
return b.create<ma::ConstantOp>(b.getIntegerAttr(type, value));
}
if (mlir::isa<mlir::FloatType>(type)) {
return b.create<ma::ConstantOp>(
b.getFloatAttr(type, static_cast<double>(value)));
}
LOG(FATAL) << "Constant type not supported: " << llvm_ir::DumpToString(type);
}
template <typename T>
ma::ConstantOp CreateConst(ImplicitLocOpBuilder& b, Type type, T value,
ArrayRef<int64_t> shape) {
auto tensor_type = mlir::RankedTensorType::get(shape, type);
if (auto int_type = mlir::dyn_cast<mlir::IntegerType>(type)) {
return b.create<ma::ConstantOp>(mlir::DenseElementsAttr::get(
tensor_type, mlir::APInt(int_type.getIntOrFloatBitWidth(), value)));
}
if (auto float_type = mlir::dyn_cast<mlir::FloatType>(type)) {
return b.create<ma::ConstantOp>(mlir::DenseElementsAttr::get(
tensor_type, b.getFloatAttr(type, static_cast<double>(value))));
}
LOG(FATAL) << "Constant type not supported: " << llvm_ir::DumpToString(type);
}
Value ZerosLike(ImplicitLocOpBuilder& b, Value x) {
if (auto src_shaped_ty = mlir::dyn_cast<ShapedType>(x.getType())) {
Type src_ty = src_shaped_ty.getElementType();
return CreateConst(b, src_ty, 0, src_shaped_ty.getShape());
}
return CreateConst(b, x.getType(), 0);
}
Value OnesLike(ImplicitLocOpBuilder& b, Value x) {
if (auto src_shaped_ty = mlir::dyn_cast<ShapedType>(x.getType())) {
Type src_ty = src_shaped_ty.getElementType();
return CreateConst(b, src_ty, 1, src_shaped_ty.getShape());
}
return CreateConst(b, x.getType(), 1);
}
bool IsFp8Type(Type t) {
return t.isFloat8E5M2() || t.isFloat8E4M3FN() || t.isFloat8E5M2FNUZ() ||
t.isFloat8E4M3FNUZ() || t.isFloat8E4M3B11FNUZ();
}
Value Cast(ImplicitLocOpBuilder& b, Value value, Type dst_element_ty) {
Type src_ty = value.getType();
Type src_element_ty = src_ty;
Type fp32_ty = b.getF32Type();
Type dst_ty = dst_element_ty;
if (auto src_shaped_ty = mlir::dyn_cast<ShapedType>(src_ty)) {
src_element_ty = src_shaped_ty.getElementType();
dst_ty = src_shaped_ty.clone(src_shaped_ty.getShape(), dst_element_ty);
fp32_ty = src_shaped_ty.clone(src_shaped_ty.getShape(), b.getF32Type());
}
if (src_ty == dst_ty) {
return value;
}
if (src_element_ty.isBF16()) {
return Cast(b, b.create<ma::ExtFOp>(fp32_ty, value), dst_element_ty);
}
if (dst_element_ty.isBF16()) {
if (!src_element_ty.isInteger(8)) {
return b.create<ma::TruncFOp>(dst_ty, Cast(b, value, b.getF32Type()));
}
}
auto src_fp_element_ty = mlir::dyn_cast<mlir::FloatType>(src_element_ty);
auto dst_fp_element_ty = mlir::dyn_cast<mlir::FloatType>(dst_element_ty);
if (src_fp_element_ty && dst_fp_element_ty) {
if (IsFp8Type(src_element_ty)) {
return b.create<mt::FpToFpOp>(dst_ty, value);
}
if (IsFp8Type(dst_element_ty)) {
return b.create<mt::FpToFpOp>(
dst_ty, value,
mt::RoundingModeAttr::get(b.getContext(), mt::RoundingMode::RTNE));
}
if (src_fp_element_ty.getFPMantissaWidth() >
dst_fp_element_ty.getFPMantissaWidth()) {
return b.create<ma::TruncFOp>(dst_ty, value);
} else {
return b.create<ma::ExtFOp>(dst_ty, value);
}
}
if (mlir::isa<mlir::IntegerType>(src_element_ty) &&
mlir::isa<mlir::IntegerType>(dst_element_ty)) {
if (src_element_ty.getIntOrFloatBitWidth() <
dst_element_ty.getIntOrFloatBitWidth()) {
if (src_element_ty.isInteger(1)) {
return b.create<ma::ExtUIOp>(dst_ty, value);
}
return b.create<ma::ExtSIOp>(dst_ty, value);
}
return b.create<ma::TruncIOp>(dst_ty, value);
}
if (mlir::isa<mlir::IntegerType>(src_element_ty) && dst_fp_element_ty) {
if (src_element_ty.isInteger(1)) {
return b.create<ma::UIToFPOp>(dst_ty, value);
}
return b.create<ma::SIToFPOp>(dst_ty, value);
}
if (src_fp_element_ty && mlir::isa<mlir::IntegerType>(dst_element_ty)) {
if (dst_element_ty.isInteger(1)) {
return b.create<ma::CmpFOp>(ma::CmpFPredicate::UNE, value,
ZerosLike(b, value));
}
return b.create<ma::FPToSIOp>(dst_ty, value);
}
LOG(FATAL) << "Type conversion not supported: "
<< llvm_ir::DumpToString(src_element_ty) << " -> "
<< llvm_ir::DumpToString(dst_element_ty);
}
Value Subtract(ImplicitLocOpBuilder& b, ValueRange values) {
if (mlir::isa<mlir::IntegerType>(mlir::getElementTypeOrSelf(values[0]))) {
return b.create<ma::SubIOp>(values[0], values[1]);
} else {
return b.create<ma::SubFOp>(values[0], values[1]);
}
}
Value Compare(ImplicitLocOpBuilder& b, ValueRange values,
mlir::mhlo::ComparisonDirection direction) {
const Type type = mlir::getElementTypeOrSelf(values[0]);
if (mlir::isa<mlir::IntegerType>(type)) {
return b.create<ma::CmpIOp>(
mlir::mhlo::impl::getCmpPredicate<ma::CmpIPredicate>(
direction,
!type.isInteger(1))
.value(),
values[0], values[1]);
}
return b.create<ma::CmpFOp>(
mlir::mhlo::impl::getCmpPredicate<ma::CmpFPredicate>(direction,
true)
.value(),
values[0], values[1]);
}
Value Maximum(ImplicitLocOpBuilder& b, const se::DeviceDescription& device_info,
ValueRange values) {
if (mlir::isa<mlir::FloatType>(mlir::getElementTypeOrSelf(values[0]))) {
return b.create<ma::MaximumFOp>(values);
}
Value lhs_is_nan =
Compare(b, {values[0], values[0]}, mlir::mhlo::ComparisonDirection::NE);
Value rhs_is_not_nan =
Compare(b, {values[1], values[1]}, mlir::mhlo::ComparisonDirection::EQ);
Value lhs_is_ge = Compare(b, values, mlir::mhlo::ComparisonDirection::GE);
return b.create<ma::SelectOp>(
b.create<ma::OrIOp>(lhs_is_nan,
b.create<ma::AndIOp>(rhs_is_not_nan, lhs_is_ge)),
values[0], values[1]);
}
Value Minimum(ImplicitLocOpBuilder& b, const se::DeviceDescription& device_info,
ValueRange values) {
if (mlir::isa<mlir::FloatType>(mlir::getElementTypeOrSelf(values[0]))) {
return b.create<ma::MinimumFOp>(values);
}
Value lhs_is_nan =
Compare(b, {values[0], values[0]}, mlir::mhlo::ComparisonDirection::NE);
Value rhs_is_not_nan =
Compare(b, {values[1], values[1]}, mlir::mhlo::ComparisonDirection::EQ);
Value lhs_is_le = Compare(b, values, mlir::mhlo::ComparisonDirection::LE);
return b.create<ma::SelectOp>(
b.create<ma::OrIOp>(lhs_is_nan,
b.create<ma::AndIOp>(rhs_is_not_nan, lhs_is_le)),
values[0], values[1]);
}
Value Splat(ImplicitLocOpBuilder& b, Value value, ArrayRef<int64_t> shape) {
auto type = mlir::RankedTensorType::get(shape, value.getType());
return b.create<mt::SplatOp>(type, value);
}
using TensorValue = mlir::TypedValue<mlir::RankedTensorType>;
Value Broadcast(ImplicitLocOpBuilder& b, TensorValue value,
ArrayRef<int64_t> shape) {
return b.create<mt::BroadcastOp>(value.getType().clone(shape), value);
}
Value Range(ImplicitLocOpBuilder& b, int32_t limit) {
auto type = mlir::RankedTensorType::get(limit, b.getI32Type());
return b.create<mt::MakeRangeOp>(type, 0, limit);
}
Value AddPtr(ImplicitLocOpBuilder& b, Value ptr, Value offset) {
return b.create<mt::AddPtrOp>(ptr.getType(), ptr, offset);
}
absl::StatusOr<Value> EmitElementwise(ImplicitLocOpBuilder& b,
absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const HloInstruction& hlo,
ValueRange inputs) {
if (mlir::getElementTypeOrSelf(inputs[0]).isF32() ||
mlir::getElementTypeOrSelf(inputs[0]).isF64()) {
auto dev_fn_id = GetTargetDeviceFunctionID(hlo.opcode());
if (dev_fn_id.ok()) {
llvm::Triple triple("nvptx64-unknown-unknown");
if (std::holds_alternative<se::RocmComputeCapability>(
device_info.gpu_compute_capability())) {
triple.setTriple("amdgcn-unknown-unknown");
}
return b.create<mt::ExternElementwiseOp>(
inputs[0].getType(), inputs, "libdevice", libdevice_path,
ObtainDeviceFunctionName(dev_fn_id.value(),
hlo.shape().element_type(), triple),
true);
}
}
const bool is_integer =
mlir::isa<mlir::IntegerType>(mlir::getElementTypeOrSelf(inputs[0]));
switch (hlo.opcode()) {
case HloOpcode::kCopy:
return inputs[0];
case HloOpcode::kAbs:
if (is_integer) {
return b.create<mm::AbsIOp>(inputs[0]);
}
return b.create<mm::AbsFOp>(inputs[0]);
case HloOpcode::kCeil:
return b.create<mm::CeilOp>(inputs[0]);
case HloOpcode::kFloor:
return b.create<mm::FloorOp>(inputs[0]);
case HloOpcode::kNot:
return b.create<ma::XOrIOp>(inputs[0], OnesLike(b, inputs[0]));
case HloOpcode::kNegate:
return Subtract(b, {ZerosLike(b, inputs[0]), inputs[0]});
case HloOpcode::kConvert: {
TF_ASSIGN_OR_RETURN(Type dst_ty,
TritonType(b, hlo.shape().element_type()));
return Cast(b, inputs[0], dst_ty);
}
case HloOpcode::kAdd:
if (is_integer) {
return b.create<ma::AddIOp>(inputs[0], inputs[1]);
}
return b.create<ma::AddFOp>(inputs[0], inputs[1]);
case HloOpcode::kSubtract:
return Subtract(b, inputs);
case HloOpcode::kMultiply:
if (is_integer) {
return b.create<ma::MulIOp>(inputs[0], inputs[1]);
}
return b.create<ma::MulFOp>(inputs[0], inputs[1]);
case HloOpcode::kMaximum:
return Maximum(b, device_info, inputs);
case HloOpcode::kMinimum:
return Minimum(b, device_info, inputs);
case HloOpcode::kClamp:
return Maximum(
b, device_info,
{Minimum(b, device_info, {inputs[1], inputs[2]}), inputs[0]});
case HloOpcode::kAnd:
return b.create<ma::AndIOp>(inputs[0], inputs[1]);
case HloOpcode::kOr:
return b.create<ma::OrIOp>(inputs[0], inputs[1]);
case HloOpcode::kXor:
return b.create<ma::XOrIOp>(inputs[0], inputs[1]);
case HloOpcode::kDivide:
if (is_integer) {
return b.create<ma::DivSIOp>(inputs[0], inputs[1]);
}
return b.create<ma::DivFOp>(inputs[0], inputs[1]);
case HloOpcode::kCompare:
return Compare(
b, inputs,
mlir::mhlo::symbolizeComparisonDirection(
ComparisonDirectionToString(hlo.comparison_direction()))
.value());
case HloOpcode::kSelect:
return b.create<ma::SelectOp>(
Compare(b, {inputs[0], ZerosLike(b, inputs[0])},
mlir::mhlo::ComparisonDirection::NE),
inputs[1], inputs[2]);
default:
return absl::InvalidArgumentError(
absl::StrCat("Unsupported elementwise operation ", hlo.ToString()));
}
}
Value EmitParameterLoad(ImplicitLocOpBuilder& b, Value pointer,
ArrayRef<int32_t> boundary_checks) {
if (auto make_tensor_ptr = pointer.getDefiningOp<mt::MakeTensorPtrOp>()) {
if (make_tensor_ptr.getOffsets().empty()) {
return Splat(b,
b.create<mt::LoadOp>(make_tensor_ptr.getBase(),
mt::CacheModifier::NONE,
mt::EvictionPolicy::NORMAL,
false),
{});
}
}
if (mt::isTensorPointerType(pointer.getType())) {
std::optional<mt::PaddingOption> padding;
if (!boundary_checks.empty()) {
padding = mt::PaddingOption::PAD_ZERO;
}
return b.create<mt::LoadOp>(pointer, boundary_checks, padding,
mt::CacheModifier::NONE,
mt::EvictionPolicy::NORMAL,
false);
}
return Splat(b,
b.create<mt::LoadOp>(pointer, mt::CacheModifier::NONE,
mt::EvictionPolicy::NORMAL,
false),
{});
}
absl::StatusOr<Value> EmitConstant(ImplicitLocOpBuilder& b,
const HloInstruction& constant) {
TF_ASSIGN_OR_RETURN(Type ty, TritonType(b, constant.shape().element_type()));
if (constant.shape().IsInteger()) {
if (constant.shape().element_type() == U64) {
return CreateConst(b, ty, ScalarConstantValue<uint64_t>(constant, U64));
} else {
return CreateConst(b, ty, ScalarConstantValue<int64_t>(constant, S64));
}
}
return CreateConst(b, ty, ScalarConstantValue<double>(constant, F64));
}
struct DimProperties {
DimProperties(int64_t index, Value pid, int block_size, int split_value)
: index(index),
pid(pid),
block_size(block_size),
split_value(split_value) {}
int64_t index;
Value pid;
int block_size;
int split_value;
};
absl::StatusOr<Value> EmitBroadcast(
ImplicitLocOpBuilder& b, const TritonFusionAnalysis* analysis,
TritonFusionAnalysis::Scope scope,
absl::Span<const DimProperties> tiled_dimensions,
const HloInstruction& broadcast, Value input) {
TF_RET_CHECK(analysis != nullptr);
std::vector<int64_t> out_shape;
for (const DimProperties& dim : tiled_dimensions) {
const TensorIterationSpec::DimIterationSpec* spec =
analysis->IterSpec(scope, &broadcast, dim.index);
if (spec != nullptr && spec->at(0).stride > 0) {
out_shape.push_back(dim.block_size);
}
}
auto tensor_input = mlir::dyn_cast<TensorValue>(input);
if (!tensor_input) {
return Splat(b, input, out_shape);
}
if (tensor_input.getType().getRank() == out_shape.size()) {
return input;
}
Value expanded_input = tensor_input;
int dim_idx = 0;
for (const DimProperties& dim : tiled_dimensions) {
if (analysis->IterSpec(scope, &broadcast, dim.index) != nullptr &&
analysis->IterSpec(scope, &broadcast, dim.index)->at(0).stride > 0) {
if (analysis->IterSpec(scope, broadcast.operand(0), dim.index) ==
nullptr) {
expanded_input = b.create<mt::ExpandDimsOp>(expanded_input, dim_idx);
}
++dim_idx;
}
}
return Broadcast(b, mlir::cast<TensorValue>(expanded_input), out_shape);
}
absl::StatusOr<Value> EmitScope(
ImplicitLocOpBuilder& b, absl::string_view libdevice_path,
const se::DeviceDescription& device_info,
const TritonFusionAnalysis* analysis, TritonFusionAnalysis::Scope scope, | #include "xla/service/gpu/ir_emitter_triton.h"
#include <cstdlib>
#include <iterator>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "llvm/IR/LLVMContext.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "xla/autotuning.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/gpu/triton_test_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class TritonTest : public GpuCodegenTest {
public:
stream_executor::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
const stream_executor::GpuComputeCapability& GpuComputeComp() {
return device_desc().gpu_compute_capability();
}
stream_executor::GpuComputeCapability CudaAmpereOrRocm() {
if (std::holds_alternative<stream_executor::RocmComputeCapability>(
GpuComputeComp())) {
return stream_executor::GpuComputeCapability{
device_desc().rocm_compute_capability()};
} else {
return stream_executor::GpuComputeCapability{
stream_executor::CudaComputeCapability{
stream_executor::CudaComputeCapability::AMPERE, 0}};
}
}
protected:
const stream_executor::DeviceDescription& device_desc() {
return backend().default_stream_executor()->GetDeviceDescription();
}
};
class TritonGemmTest : public TritonTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = TritonTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cublas_fallback(false);
debug_options.set_xla_gpu_enable_split_k_autotuning(false);
debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0);
return debug_options;
}
void MatchHloModule(HloModule& module, absl::string_view pattern) {
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_result,
RunFileCheck(module.ToString(), pattern));
EXPECT_TRUE(filecheck_result);
}
};
class TritonGemmTestWithSplitK : public TritonGemmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = TritonGemmTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_split_k_autotuning(true);
return debug_options;
}
};
class TritonGemmTestWithoutTritonGemmAny : public TritonGemmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = TritonGemmTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_any(false);
return debug_options;
}
};
TEST_F(TritonTest, TestGemm) {
const std::string kHloText = R"(
HloModule t, is_scheduled=true
triton_gemm_r {
parameter_0 = s8[80,115]{1,0} parameter(0)
convert.3 = f32[80,115]{1,0} convert(parameter_0)
parameter_1 = f32[137,115]{1,0} parameter(1)
ROOT r.1 = f32[80,137]{1,0} dot(convert.3, parameter_1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p1 = f32[137,115]{1,0} parameter(1)
p0 = s8[80,115]{1,0} parameter(0)
ROOT triton_gemm_r = f32[80,137]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_gemm_r,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_EXPECT_OK(
CreateTritonIrAndFileCheckForDot(this, kHloText, "triton_gemm_r", R"(
CHECK: tt.func @triton_fn(%[[LHS:.*]]: !tt.ptr<i8> {tt.divisibility = 16 : i32}, %[[RHS:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[OUT:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[ZERO_KN:.*]] = arith.constant dense<0.000000e+00> : tensor<32x64xf32>
CHECK-DAG: %[[ZERO_MK:.*]] = arith.constant dense<0.000000e+00> : tensor<16x32xf32>
CHECK-DAG: %[[ZERO_MN:.*]] = arith.constant dense<0.000000e+00> : tensor<16x64xf32>
CHECK-DAG: %[[SIZE_K:.*]] = arith.constant 115 : i32
CHECK-DAG: %[[SIZE_M:.*]] = arith.constant 137 : i64
CHECK-DAG: %[[C1:.*]] = arith.constant 1 : i64
CHECK-DAG: %[[C0:.*]] = arith.constant 0 : i32
CHECK-DAG: %[[C80:.*]] = arith.constant 80 : i64
CHECK-DAG: %[[TILE_SIZE_K:.*]] = arith.constant 32 : i32
CHECK-DAG: %[[TILE_SIZE_N:.*]] = arith.constant 64 : i32
CHECK-DAG: %[[TILE_SIZE_M:.*]] = arith.constant 16 : i32
CHECK-DAG: %[[NUM_TILES_M:.*]] = arith.constant 5 : i32
CHECK-DAG: %[[GROUP_M:.*]] = arith.constant 8 : i32
CHECK-DAG: %[[WIDTH:.*]] = arith.constant 24 : i32
CHECK: %[[PID_NC:.*]] = tt.get_program_id x
CHECK: %[[GROUP_ID:.*]] = arith.divsi %[[PID_NC]], %[[WIDTH]]
CHECK: %[[FIRST_PID_M:.*]] = arith.muli %[[GROUP_ID]], %[[GROUP_M]]
CHECK: %[[MAX_M:.*]] = arith.subi %[[NUM_TILES_M]], %[[FIRST_PID_M]]
CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[MAX_M]], %[[GROUP_M]]
CHECK: %[[GROUP_SIZE:.*]] = arith.select %[[CMP]], %[[MAX_M]], %[[GROUP_M]]
CHECK: %[[PID_M:.*]] = arith.remsi %[[PID_NC]], %[[GROUP_SIZE]]
CHECK: %[[TILE_INDEX_M:.*]] = arith.addi %[[FIRST_PID_M]], %[[PID_M]] : i32
CHECK: %[[TMP:.*]] = arith.remsi %[[PID_NC]], %[[WIDTH]] : i32
CHECK: %[[TILE_INDEX_N:.*]] = arith.divsi %[[TMP]], %[[GROUP_SIZE]] : i32
CHECK: %[[TILE_OFFSET_M_LHS:.*]] = arith.muli %[[TILE_INDEX_M]], %[[TILE_SIZE_M]]
CHECK: %[[LHS_PTR:.*]] = tt.make_tensor_ptr %[[LHS]]
CHECK: %[[LHS_TILE_PTR:.*]] = tt.advance %[[LHS_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[C0]]]
CHECK: %[[TILE_OFFSET_N_RHS:.*]] = arith.muli %[[TILE_INDEX_N]], %[[TILE_SIZE_N]]
CHECK: %[[RHS_PTR:.*]] = tt.make_tensor_ptr %[[RHS]]
CHECK: %[[RHS_TILE_PTR:.*]] = tt.advance %[[RHS_PTR]], [%[[C0]], %[[TILE_OFFSET_N_RHS]]]
CHECK: %[[FOR:.*]]:3 = scf.for %[[BLOCK_K:.*]] = %[[C0]] to %[[SIZE_K]] step %[[TILE_SIZE_K]]
CHECK-SAME: iter_args(%[[LHS_ITER_PTR:.*]] = %[[LHS_TILE_PTR]], %[[RHS_ITER_PTR:.*]] = %[[RHS_TILE_PTR]], %[[ACC:.*]] = %[[ZERO_MN]])
CHECK: %[[LHS_TILE:.*]] = tt.load %[[LHS_ITER_PTR]] {boundaryCheck = array<i32: 1>
CHECK: %[[LHS_ITER_PTR_NEXT:.*]] = tt.advance %[[LHS_ITER_PTR]], [%[[C0]], %[[TILE_SIZE_K]]]
CHECK: %[[RHS_TILE:.*]] = tt.load %[[RHS_ITER_PTR]] {boundaryCheck = array<i32: 0, 1>
CHECK: %[[RHS_ITER_PTR_NEXT:.*]] = tt.advance %[[RHS_ITER_PTR]], [%[[TILE_SIZE_K]], %[[C0]]]
CHECK: %[[CONVERTED:.*]] = arith.sitofp %[[LHS_TILE]] : tensor<16x32xi8> to tensor<16x32xf32>
CHECK: %[[TILE_K_LIMIT:.*]] = arith.subi %[[SIZE_K]], %[[BLOCK_K]] : i32
CHECK: %[[K_TILE_IOTA:.*]] = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<32xi32>
CHECK: %[[K_OFFSETS_1K:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 0 : i32} : tensor<32xi32> -> tensor<1x32xi32>
CHECK: %[[TILE_K_LIMIT_1K:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_1K:.*]] = arith.cmpi slt, %[[K_OFFSETS_1K]], %[[TILE_K_LIMIT_1K]] : tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_MK:.*]] = tt.broadcast %[[LHS_INBOUNDS_1K]] : tensor<1x32xi1> -> tensor<16x32xi1>
CHECK: %[[LHS_MASKED:.*]] = arith.select %[[LHS_INBOUNDS_MK]], %[[CONVERTED]], %[[ZERO_MK]]
CHECK: %[[K_OFFSETS_K1:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 1 : i32} : tensor<32xi32> -> tensor<32x1xi32>
CHECK: %[[TILE_K_LIMIT_K1:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_K1:.*]] = arith.cmpi slt, %[[K_OFFSETS_K1]], %[[TILE_K_LIMIT_K1]] : tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_KN:.*]] = tt.broadcast %[[RHS_INBOUNDS_K1]] : tensor<32x1xi1> -> tensor<32x64xi1>
CHECK: %[[RHS_MASKED:.*]] = arith.select %[[RHS_INBOUNDS_KN]], %[[RHS_TILE]], %[[ZERO_KN]] : tensor<32x64xi1>, tensor<32x64xf32>
CHECK: %[[ACC_NEXT:.*]] = tt.dot %[[LHS_MASKED]], %[[RHS_MASKED]], %[[ACC]]
CHECK: scf.yield %[[LHS_ITER_PTR_NEXT]], %[[RHS_ITER_PTR_NEXT]], %[[ACC_NEXT]] : !tt.ptr<tensor<16x32xi8>>, !tt.ptr<tensor<32x64xf32>>, tensor<16x64xf32>
CHECK: }
CHECK: %[[OUT_PTR:.*]] = tt.make_tensor_ptr %[[OUT]], [%[[C80]], %[[SIZE_M]]], [%[[SIZE_M]], %[[C1]]], [%[[C0]], %[[C0]]] {order = array<i32: 1, 0>} : <tensor<16x64xf32>>
CHECK: %[[OUT_OFFSET:.*]] = tt.advance %[[OUT_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[TILE_OFFSET_N_RHS]]] : <tensor<16x64xf32>>
CHECK: tt.store %[[OUT_OFFSET]], %[[FOR]]#2 {boundaryCheck = array<i32: 1>} : !tt.ptr<tensor<16x64xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestGemmWithTrivialNonContractingDimension) {
const std::string kHloText = R"(
HloModule t, is_scheduled=true
triton_dot {
param_0.1 = f32[137,115]{1,0} parameter(0)
param_1.1 = f32[1,115]{1,0} parameter(1)
ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[137,115]{1,0} parameter(0)
p1 = f32[1,115]{1,0} parameter(1)
ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":16,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_EXPECT_OK(
CreateTritonIrAndFileCheckForDot(this, kHloText, "triton_dot", R"(
CHECK: tt.func @triton_fn(%[[LHS:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[RHS:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[OUT:.*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[ZERO_KN:.*]] = arith.constant dense<0.000000e+00> : tensor<32x16xf32>
CHECK-DAG: %[[ZERO_MK:.*]] = arith.constant dense<0.000000e+00> : tensor<16x32xf32>
CHECK-DAG: %[[ZERO_MN:.*]] = arith.constant dense<0.000000e+00> : tensor<16x16xf32>
CHECK-DAG: %[[SIZE_K:.*]] = arith.constant 115 : i32
CHECK-DAG: %[[SIZE_M:.*]] = arith.constant 137 : i64
CHECK-DAG: %[[C1:.*]] = arith.constant 1 : i64
CHECK-DAG: %[[C0:.*]] = arith.constant 0 : i32
CHECK-DAG: %[[C115:.*]] = arith.constant 115 : i64
CHECK-DAG: %[[TILE_SIZE_K:.*]] = arith.constant 32 : i32
CHECK-DAG: %[[TILE_SIZE_M:.*]] = arith.constant 16 : i32
CHECK-DAG: %[[C8:.*]] = arith.constant 8 : i32
CHECK-DAG: %[[NUM_TILES_M:.*]] = arith.constant 9 : i32
CHECK: %[[PID_NC:.*]] = tt.get_program_id x : i32
CHECK: %[[GROUP_ID:.*]] = arith.divsi %[[PID_NC]], %[[C8]]
CHECK: %[[FIRST_PID_M:.*]] = arith.muli %[[GROUP_ID]], %[[C8]]
CHECK: %[[MAX_M:.*]] = arith.subi %[[NUM_TILES_M]], %[[FIRST_PID_M]]
CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[MAX_M]], %[[C8]]
CHECK: %[[GROUP_SIZE:.*]] = arith.select %[[CMP]], %[[MAX_M]], %[[C8]]
CHECK: %[[PID_M:.*]] = arith.remsi %[[PID_NC]], %[[GROUP_SIZE]]
CHECK: %[[TILE_INDEX_M:.*]] = arith.addi %[[FIRST_PID_M]], %[[PID_M]]
CHECK: %[[TMP:.*]] = arith.remsi %[[PID_NC]], %[[C8]]
CHECK: %[[TILE_INDEX_N:.*]] = arith.divsi %[[TMP]], %[[GROUP_SIZE]]
CHECK: %[[TILE_OFFSET_M_LHS:.*]] = arith.muli %[[TILE_INDEX_M]], %[[TILE_SIZE_M]]
CHECK: %[[LHS_PTR:.*]] = tt.make_tensor_ptr %[[LHS]]
CHECK: %[[LHS_TILE_PTR:.*]] = tt.advance %[[LHS_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[C0]]]
CHECK: %[[TILE_OFFSET_N_RHS:.*]] = arith.muli %[[TILE_INDEX_N]], %[[TILE_SIZE_M]]
CHECK: %[[RHS_PTR:.*]] = tt.make_tensor_ptr %[[RHS]]
CHECK: %[[RHS_TILE_PTR:.*]] = tt.advance %[[RHS_PTR]], [%[[C0]], %[[TILE_OFFSET_N_RHS]]]
CHECK: %[[FOR:.*]]:3 = scf.for %[[BLOCK_K:.*]] = %[[C0]] to %[[SIZE_K]] step %[[TILE_SIZE_K]]
CHECK-SAME: iter_args(%[[LHS_ITER_PTR:.*]] = %[[LHS_TILE_PTR]], %[[RHS_ITER_PTR:.*]] = %[[RHS_TILE_PTR]], %[[ACC:.*]] = %[[ZERO_MN]])
CHECK: %[[LHS_TILE:.*]] = tt.load %[[LHS_ITER_PTR]] {boundaryCheck = array<i32: 0, 1>
CHECK: %[[LHS_ITER_PTR_NEXT:.*]] = tt.advance %[[LHS_ITER_PTR]], [%[[C0]], %[[TILE_SIZE_K]]]
CHECK: %[[RHS_TILE:.*]] = tt.load %[[RHS_ITER_PTR]] {boundaryCheck = array<i32: 0, 1>
CHECK: %[[RHS_ITER_PTR_NEXT:.*]] = tt.advance %[[RHS_ITER_PTR]], [%[[TILE_SIZE_K]], %[[C0]]]
CHECK: %[[TILE_K_LIMIT:.*]] = arith.subi %[[SIZE_K]], %[[BLOCK_K]] : i32
CHECK: %[[K_TILE_IOTA:.*]] = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<32xi32>
CHECK: %[[K_OFFSETS_1K:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 0 : i32} : tensor<32xi32> -> tensor<1x32xi32>
CHECK: %[[TILE_K_LIMIT_1K:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_1K:.*]] = arith.cmpi slt, %[[K_OFFSETS_1K]], %[[TILE_K_LIMIT_1K]] : tensor<1x32xi32>
CHECK: %[[LHS_INBOUNDS_MK:.*]] = tt.broadcast %[[LHS_INBOUNDS_1K]] : tensor<1x32xi1> -> tensor<16x32xi1>
CHECK: %[[LHS_MASKED:.*]] = arith.select %[[LHS_INBOUNDS_MK]], %[[LHS_TILE]], %[[ZERO_MK]]
CHECK: %[[K_OFFSETS_K1:.*]] = tt.expand_dims %[[K_TILE_IOTA]] {axis = 1 : i32} : tensor<32xi32> -> tensor<32x1xi32>
CHECK: %[[TILE_K_LIMIT_K1:.*]] = tt.splat %[[TILE_K_LIMIT]] : i32 -> tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_K1:.*]] = arith.cmpi slt, %[[K_OFFSETS_K1]], %[[TILE_K_LIMIT_K1]] : tensor<32x1xi32>
CHECK: %[[RHS_INBOUNDS_KN:.*]] = tt.broadcast %[[RHS_INBOUNDS_K1]] : tensor<32x1xi1> -> tensor<32x16xi1>
CHECK: %[[RHS_MASKED:.*]] = arith.select %[[RHS_INBOUNDS_KN]], %[[RHS_TILE]], %[[ZERO_KN]] : tensor<32x16xi1>, tensor<32x16xf32>
CHECK: %[[ACC_NEXT:.*]] = tt.dot %[[LHS_MASKED]], %[[RHS_MASKED]], %[[ACC]]
CHECK: scf.yield %[[LHS_ITER_PTR_NEXT]], %[[RHS_ITER_PTR_NEXT]], %[[ACC_NEXT]] : !tt.ptr<tensor<16x32xf32>>, !tt.ptr<tensor<32x16xf32>>, tensor<16x16xf32>
CHECK: }
CHECK: %[[OUT_PTR:.*]] = tt.make_tensor_ptr %[[OUT]], [%[[SIZE_M]], %[[C1]]], [%[[C1]], %[[C1]]], [%[[C0]], %[[C0]]] {order = array<i32: 1, 0>} : <tensor<16x16xf32>>
CHECK: %[[OUT_OFFSET:.*]] = tt.advance %[[OUT_PTR]], [%[[TILE_OFFSET_M_LHS]], %[[TILE_OFFSET_N_RHS]]] : <tensor<16x16xf32>>
CHECK: tt.store %[[OUT_OFFSET]], %[[FOR]]#2 {boundaryCheck = array<i32: 0, 1>} : !tt.ptr<tensor<16x16xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithSingleParameter) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
parameter_0 = f32[125,127]{1,0} parameter(0)
multiply_0 = f32[125,127]{1,0} multiply(parameter_0, parameter_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(0)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
})";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK: %[[PID:.*]] = tt.get_program_id x : i32
CHECK: arith.index_castui %[[PID]] : i32 to index
CHECK: tt.addptr %[[P0]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG2:[^:]*]]: f32, %[[ARG3:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG2]], %[[ARG3]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.splat
CHECK: arith.mulf
CHECK-SAME: tensor<128xf32>
CHECK: tt.addptr %[[P1]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithSingleScalarParameter) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
parameter_0 = f32[] parameter(0)
broadcast_1 = f32[125,127]{1,0} broadcast(parameter_0), dimensions={}
multiply_0 = f32[125,127]{1,0} multiply(broadcast_1, broadcast_1)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[] constant(42)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
})";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK-DAG: %[[ARG_0:.*]] = tt.addptr %[[P0]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK: tt.load %[[ARG_0]] : !tt.ptr<f32>
CHECK-NEXT: tt.splat
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG2:[^:]*]]: f32, %[[ARG3:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG2]], %[[ARG3]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.splat
CHECK: arith.mulf
CHECK-SAME: tensor<128xf32>
CHECK: tt.addptr %[[P1]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithMultipleParameters) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[125,127]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG3:[^:]*]]: f32, %[[ARG4:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.addptr %[[P2]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest, TestSoftmaxEmitterWithMultipleParametersOrderSwapped) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(1)
param_1 = f32[127]{0} parameter(0)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[125,127]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(1)
param_1 = f32[127]{0} parameter(0)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_1, param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG3:[^:]*]]: f32, %[[ARG4:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: tt.splat
CHECK: tt.addptr %[[P2]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest,
TestSoftmaxEmitterWithAdditionalParameterEnteringAfterDiamond) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(param_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
param_1 = f32[127]{0} parameter(1)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
ROOT multiply_0 = f32[125,127]{1,0} multiply(broadcast_4, broadcast_0)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.reduce
CHECK-NEXT: ^bb0(%[[ARG3:[^:]*]]: f32, %[[ARG4:[^:]*]]: f32):
CHECK-NEXT: %[[ADD:.*]] = arith.addf %[[ARG3]], %[[ARG4]] : f32
CHECK-NEXT: tt.reduce.return %[[ADD]] : f32
CHECK-NEXT: }) : (tensor<128xf32>) -> f32
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: tt.addptr %[[P2]]
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.store
CHECK-SAME: {boundaryCheck = array<i32: 0>} : !tt.ptr<tensor<128xf32>>
CHECK: tt.return
CHECK: }
)"));
}
TEST_F(TritonTest,
TestSoftmaxEmitterWithMultipleParametersAlongTiledDimension) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[125,127]{1,0} parameter(0)
param_1 = f32[127]{0} parameter(1)
param_2 = f32[125]{0} parameter(2)
broadcast_0 = f32[125,127]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[125,127]{1,0} multiply(param_0, broadcast_0)
broadcast_1 = f32[125,127]{1,0} broadcast(param_2), dimensions={0}
multiply_1 = f32[125,127]{1,0} multiply(multiply_0, broadcast_1)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_1, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_1, broadcast_4)
}
ENTRY main {
param_0 = f32[125,127]{1,0} parameter(1)
param_1 = f32[127]{0} parameter(0)
param_2 = f32[125]{0} parameter(2)
ROOT triton_softmax = f32[125,127]{1,0} fusion(param_0, param_1, param_2), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)";
TF_EXPECT_OK(CreateTritonIrAndFileCheck(this, kHloText,
FromOutputTileSizes({1, 127}),
"triton_softmax_computation", R"(
CHECK: #[[MAP:.*]] = affine_map<(d0) -> (d0 * 127)>
CHECK: tt.func @triton_fn(%[[P0:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P1:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P2:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}, %[[P3:[^:]*]]: !tt.ptr<f32> {tt.divisibility = 16 : i32}) {
CHECK-DAG: %[[C127_i64:.*]] = arith.constant 127 : i64
CHECK-DAG: %[[ZERO_OFFSET:.*]] = arith.constant 0 : i64
CHECK-DAG: %[[PID:.*]] = tt.get_program_id x : i32
CHECK-DAG: %[[PID_INDEX:.*]] = arith.index_castui %[[PID]] : i32 to index
CHECK: %[[ROW_OFFSET_INDEX:.*]] = xla_gpu.apply_indexing #[[MAP]](%[[PID_INDEX]]
CHECK: %[[ROW_OFFSET:.*]] = arith.index_castui %[[ROW_OFFSET_INDEX]] : index to i64
CHECK: %[[ARG0:.*]] = tt.addptr %[[P0]], %[[ROW_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: tt.load
CHECK-SAME: {boundaryCheck = array<i32: 0>, padding = 1 : i32} : !tt.ptr<tensor<128xf32>>
CHECK: %[[ARG1:.*]] = tt.addptr %[[P1]], %[[ZERO_OFFSET]] : !tt.ptr<f32>, i64
CHECK-NEXT: tt.make_tensor_ptr
CHECK-SAME: <tensor<128xf32>>
CHECK-NEXT: | 2,050 |
#ifndef XLA_SERVICE_GPU_TRITON_TILING_PROPAGATION_H_
#define XLA_SERVICE_GPU_TRITON_TILING_PROPAGATION_H_
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class TensorIterationSpec {
public:
struct IterationSpecFragment {
int64_t stride;
int64_t count;
int64_t slice_start;
int64_t sliced_count;
std::vector<int64_t> subfragments;
bool is_sliced() const { return count != sliced_count; }
auto ToTuple() const {
return std::make_tuple(stride, count, slice_start, sliced_count,
subfragments);
}
bool operator==(const IterationSpecFragment& other) const {
return ToTuple() == other.ToTuple();
}
template <typename H>
friend H AbslHashValue(H h, const IterationSpecFragment& fragment) {
return H::combine(std::move(h), fragment.ToTuple());
}
bool IsPhysicallyEquivalent(const IterationSpecFragment& other) const {
return stride == other.stride && count == other.count &&
slice_start == other.slice_start &&
sliced_count == other.sliced_count;
}
std::string ToString() const;
};
using DimIterationSpec = std::vector<IterationSpecFragment>;
const DimIterationSpec& operator[](const int dimension) const {
return dim_iteration_specs_.at(dimension);
}
DimIterationSpec& operator[](const int dimension) {
return dim_iteration_specs_[dimension];
}
const DimIterationSpec* Find(int dimension) const;
std::vector<int> GetDimensions() const;
void RemoveEmptyDimensions() {
absl::erase_if(dim_iteration_specs_,
[](const auto& it) { return it.second.empty(); });
}
bool operator==(const TensorIterationSpec& other) const {
return dim_iteration_specs_ == other.dim_iteration_specs_;
}
template <typename H>
friend H AbslHashValue(H h, const TensorIterationSpec& spec) {
return H::combine(std::move(h), spec.dim_iteration_specs_);
}
bool IsPhysicallyEquivalent(const TensorIterationSpec& other) const;
std::string ToString() const;
private:
absl::flat_hash_map<int, DimIterationSpec> dim_iteration_specs_;
};
namespace triton_fusion {
class DimensionOrder {
public:
static DimensionOrder FromDotOperandOrOutput(
const HloInstruction& hlo, int split_k_dimension_index = -1);
class Fragment {
public:
explicit Fragment(int dst_dim_number, int64_t count)
: dst_dim_number_(dst_dim_number),
count_(count),
slice_start_(0),
sliced_count_(count) {}
std::string ToString() const;
int dst_dim_number() const { return dst_dim_number_; }
int64_t full_count() const { return count_; }
int64_t slice_start() const { return slice_start_; }
int64_t sliced_count() const { return sliced_count_; }
bool is_sliced() const { return count_ != sliced_count_; }
void set_slice(int64_t start, int64_t count) {
slice_start_ = start;
sliced_count_ = count;
}
void set_count(int64_t count) { count_ = count; }
private:
const int dst_dim_number_;
int64_t count_;
int64_t slice_start_;
int64_t sliced_count_;
};
using Fragments = std::vector<Fragment>;
using FragmentOrders = absl::flat_hash_map<int, std::vector<int>>;
const Fragments& TensorFragmentsOrder() const {
return tensor_fragments_order_;
}
Fragments& TensorFragmentsOrder() { return tensor_fragments_order_; }
const FragmentOrders& DimFragmentsOrders() const {
return dim_fragments_orders_;
}
FragmentOrders& DimFragmentsOrders() { return dim_fragments_orders_; }
std::string ToString() const;
TensorIterationSpec ToTensorIterationSpec() const;
bool IsPhysicallyEquivalent(const DimensionOrder& other) const {
return ToTensorIterationSpec().IsPhysicallyEquivalent(
other.ToTensorIterationSpec());
}
private:
Fragments tensor_fragments_order_;
FragmentOrders dim_fragments_orders_;
};
inline constexpr int kNoDimensionIndex = -1;
struct DotProperties {
const int noncontracting_dimension;
const int splittable_dimension_index;
};
inline constexpr int kNoSplitRequirement = 1;
struct DotRequirements {
explicit DotRequirements(int64_t splittable_dimension_major_part_size)
: splittable_dimension_major_part_size(
splittable_dimension_major_part_size) {
CHECK_GE(splittable_dimension_major_part_size, 1);
}
int64_t splittable_dimension_major_part_size;
};
using DotRequirementsOrError = std::variant<DotRequirements, FusionDecision>;
DotRequirementsOrError CombineDotRequirements(
DotRequirements a, DotRequirementsOrError b_or_error);
enum class TransformDirection { kInputToOutput, kOutputToInput };
using DimOrderMap = absl::flat_hash_map<const HloInstruction*, DimensionOrder>;
using DimOrderMapOrError = std::variant<DimOrderMap, FusionDecision>;
struct DimOrdersAndReqs {
DimOrderMap dim_orders;
DotRequirements requirements;
};
using DimOrdersAndReqsOrError = std::variant<DimOrdersAndReqs, FusionDecision>;
DimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirements(
const HloInstruction& hlo, const DimensionOrder& src_dim_order,
TransformDirection direction, const DotProperties& properties);
DimOrdersAndReqsOrError
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
const HloInstruction& hlo, TransformDirection transform_direction,
const std::optional<int>& src_operand_index,
const DimensionOrder& src_dim_order,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties);
}
}
}
#endif
#include "xla/service/gpu/triton_tiling_propagation.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <list>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
FilterTrivialDims(
const absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>&
dim_iter_specs) {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
non_trivial_dim_iteration_specs;
for (const auto& [dim, dim_spec] : dim_iter_specs) {
if (dim_spec.size() == 1 && dim_spec[0].count == 1) {
continue;
}
non_trivial_dim_iteration_specs[dim] = dim_spec;
}
return non_trivial_dim_iteration_specs;
}
}
const TensorIterationSpec::DimIterationSpec* TensorIterationSpec::Find(
const int dimension) const {
if (auto it = dim_iteration_specs_.find(dimension);
it != dim_iteration_specs_.end()) {
return &it->second;
}
return nullptr;
}
std::vector<int> TensorIterationSpec::GetDimensions() const {
std::vector<int> result;
result.reserve(dim_iteration_specs_.size());
for (const auto& [dim, _] : dim_iteration_specs_) {
result.push_back(dim);
}
return result;
}
bool TensorIterationSpec::IsPhysicallyEquivalent(
const TensorIterationSpec& other) const {
const absl::flat_hash_map<int, DimIterationSpec>
non_trivial_dim_iteration_specs = FilterTrivialDims(dim_iteration_specs_);
const absl::flat_hash_map<int, DimIterationSpec>
other_non_trivial_dim_iteration_specs =
FilterTrivialDims(other.dim_iteration_specs_);
if (non_trivial_dim_iteration_specs.size() !=
other_non_trivial_dim_iteration_specs.size()) {
return false;
}
for (const auto& pair : non_trivial_dim_iteration_specs) {
int dimension = pair.first;
const DimIterationSpec& dim_iter_spec = pair.second;
auto other_it = other_non_trivial_dim_iteration_specs.find(dimension);
if (other_it == other_non_trivial_dim_iteration_specs.end()) {
return false;
}
const DimIterationSpec& other_dim_iter_spec = other_it->second;
if (dim_iter_spec.size() != other_dim_iter_spec.size()) {
return false;
}
for (size_t i = 0; i < dim_iter_spec.size(); i++) {
if (!dim_iter_spec[i].IsPhysicallyEquivalent(other_dim_iter_spec[i])) {
return false;
}
}
}
return true;
}
std::string TensorIterationSpec::IterationSpecFragment::ToString() const {
return absl::StrCat("{stride=", stride, ", count=", count,
", slice_start=", slice_start,
", sliced_count=", sliced_count, ", subfragments=[",
absl::StrJoin(subfragments, ", "), "]}");
}
std::string TensorIterationSpec::ToString() const {
return absl::StrCat(
"{",
absl::StrJoin(dim_iteration_specs_, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, kv.first, ": ", "[",
absl::StrJoin(kv.second, ", ",
[&](std::string* ss, const auto& v) {
absl::StrAppend(ss, v.ToString());
}),
"]");
}),
"}");
}
namespace triton_fusion {
using Fragment = DimensionOrder::Fragment;
using Fragments = DimensionOrder::Fragments;
using FragmentOrders = DimensionOrder::FragmentOrders;
DimensionOrder DimensionOrder::FromDotOperandOrOutput(
const HloInstruction& hlo, const int split_k_dimension_index) {
DimensionOrder dim_order;
dim_order.tensor_fragments_order_.reserve(hlo.shape().rank());
for (const int i : hlo.shape().layout().minor_to_major()) {
int target_dim_number = i;
if (i == split_k_dimension_index) {
CHECK(!dim_order.tensor_fragments_order_.empty())
<< "The split-K batch dimension has be preceded by the contracting "
"dimension it originates from by construction.";
target_dim_number =
dim_order.tensor_fragments_order_.back().dst_dim_number();
}
dim_order.dim_fragments_orders_[target_dim_number].push_back(
dim_order.tensor_fragments_order_.size());
dim_order.tensor_fragments_order_.push_back(
Fragment{target_dim_number, hlo.shape().dimensions(i)});
}
return dim_order;
}
std::string DimensionOrder::Fragment::ToString() const {
return absl::StrCat(dst_dim_number_, ":", count_, ":", slice_start_, "-",
sliced_count_);
}
std::string DimensionOrder::ToString() const {
std::string ret = absl::StrJoin(tensor_fragments_order_, " - ",
[](std::string* out, const Fragment& f) {
absl::StrAppend(out, f.ToString(), " ");
});
absl::StrAppend(&ret, "|");
for (const auto& [dim, fragments] : dim_fragments_orders_) {
absl::StrAppend(&ret, dim, ":", absl::StrJoin(fragments, ","), " ");
}
return ret;
}
TensorIterationSpec DimensionOrder::ToTensorIterationSpec() const {
const Fragments& dim_fragments = TensorFragmentsOrder();
TensorIterationSpec tensor_spec;
int64_t accumulated_stride = 1;
int last_dim = -1;
for (int dim_order_index = 0; dim_order_index < dim_fragments.size();
++dim_order_index) {
const DimensionOrder::Fragment& fragment = dim_fragments[dim_order_index];
VLOG(6) << fragment.ToString();
TensorIterationSpec::DimIterationSpec& dim_spec =
tensor_spec[fragment.dst_dim_number()];
if (last_dim == fragment.dst_dim_number()) {
if (!dim_spec.empty() && !dim_spec.back().subfragments.empty() &&
dim_spec.back().subfragments.back() == 1) {
dim_spec.back().subfragments.pop_back();
}
if (fragment.full_count() > 1) {
CHECK(!dim_spec.empty());
CHECK(!dim_spec.back().is_sliced())
<< "Only the major-most fragment can have an offset.";
dim_spec.back().slice_start =
fragment.slice_start() * dim_spec.back().count;
dim_spec.back().sliced_count =
fragment.sliced_count() * dim_spec.back().count;
dim_spec.back().count *= fragment.full_count();
dim_spec.back().subfragments.push_back(fragment.sliced_count());
}
} else {
dim_spec.push_back(TensorIterationSpec::IterationSpecFragment{
accumulated_stride,
fragment.full_count(),
fragment.slice_start(),
fragment.sliced_count(),
{fragment.sliced_count()}});
}
accumulated_stride *= fragment.full_count();
last_dim = fragment.dst_dim_number();
}
for (int dim_idx : tensor_spec.GetDimensions()) {
TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[dim_idx];
if (dim_spec.size() <= 1) continue;
TensorIterationSpec::DimIterationSpec filtered_dim_spec;
absl::c_copy_if(dim_spec, std::back_inserter(filtered_dim_spec),
[](const TensorIterationSpec::IterationSpecFragment& f) {
return f.count != 1;
});
tensor_spec[dim_idx] = filtered_dim_spec;
}
tensor_spec.RemoveEmptyDimensions();
return tensor_spec;
}
namespace {
std::optional<int> LogicalIndexOfLabeledDimension(
const Shape& shape, const DimensionOrder& dim_order, const int label) {
auto fragment_it = dim_order.TensorFragmentsOrder().cbegin();
for (int dim : shape.layout().minor_to_major()) {
const int64_t dim_size = shape.dimensions()[dim];
int64_t fragments_size = 1;
while (fragments_size < dim_size) {
fragments_size *= fragment_it->full_count();
if (fragment_it->dst_dim_number() == label) {
return dim;
}
++fragment_it;
}
}
return std::nullopt;
}
using Int64OrError = std::variant<int64_t, FusionDecision>;
Int64OrError CombineSplitDimMajorPartSizeReqs(int64_t a, int64_t b) {
if (a == b || b == kNoSplitRequirement) {
return a;
}
if (a == kNoSplitRequirement) {
return b;
}
return FusionDecision("Conflicting splits of splittable dimension");
}
}
DotRequirementsOrError CombineDotRequirements(
DotRequirements a, DotRequirementsOrError b_or_error) {
if (std::holds_alternative<FusionDecision>(b_or_error)) {
return b_or_error;
}
const DotRequirements& b = std::get<DotRequirements>(b_or_error);
Int64OrError combined_size_req =
CombineSplitDimMajorPartSizeReqs(a.splittable_dimension_major_part_size,
b.splittable_dimension_major_part_size);
if (std::holds_alternative<FusionDecision>(combined_size_req)) {
return std::get<FusionDecision>(combined_size_req);
}
return DotRequirements(std::get<int64_t>(combined_size_req));
}
namespace {
DotRequirementsOrError GetRequirementsIfSupportedOrder(
const DimensionOrder& order, const DotProperties& properties) {
VLOG(8) << order.ToString();
int64_t split_dim_major_part = kNoSplitRequirement;
const Fragments& tensor_dim_fragments = order.TensorFragmentsOrder();
for (const auto& [dim_index, dim_fragments] : order.DimFragmentsOrders()) {
CHECK(!dim_fragments.empty());
for (int i = 0; i < dim_fragments.size() - 1; ++i) {
if (tensor_dim_fragments[dim_fragments[i]].is_sliced()) {
return "Sliced non-major-most fragment.";
}
}
int group_counter = 0;
int last_seen_group_last_fragment_index = -1;
auto fragment_it = dim_fragments.cbegin();
while (true) {
if (fragment_it == dim_fragments.cend()) {
break;
}
int64_t grouped_size = tensor_dim_fragments[*fragment_it].full_count();
while ((fragment_it + 1) != dim_fragments.cend() &&
*(fragment_it + 1) == *fragment_it + 1) {
++fragment_it;
grouped_size *= tensor_dim_fragments[*fragment_it].full_count();
}
if (grouped_size == 1) {
++fragment_it;
continue;
}
if (last_seen_group_last_fragment_index > *fragment_it) {
return "Transpose within a dimension.";
}
++group_counter;
if (group_counter > 1) {
const int splittable_dimension_index =
properties.splittable_dimension_index;
if (dim_index == splittable_dimension_index) {
if (group_counter == 2) {
if (split_dim_major_part != kNoSplitRequirement &&
split_dim_major_part != grouped_size) {
return "Conflicting splits of splittable dimension";
}
split_dim_major_part = grouped_size;
} else if (group_counter > 2) {
return "2nd split of a splittable dimension.";
}
} else {
return "Unsupported split of a dimension.";
}
}
last_seen_group_last_fragment_index = *fragment_it;
++fragment_it;
}
}
return DotRequirements(split_dim_major_part);
}
DotRequirementsOrError GetRequirementsIfSupportedOrders(
const HloInstruction& hlo, const DimOrderMap& dim_orders,
const DotProperties& properties) {
const DotRequirements empty_requirements(kNoSplitRequirement);
auto get_requirements =
[&](const HloInstruction& instr) -> DotRequirementsOrError {
if (auto it = dim_orders.find(&instr); it != dim_orders.end()) {
return GetRequirementsIfSupportedOrder(it->second, properties);
}
return empty_requirements;
};
DotRequirements requirements = empty_requirements;
for (const HloInstruction* operand : hlo.operands()) {
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements, get_requirements(*operand));
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return requirements_or_error;
}
requirements = std::get<DotRequirements>(requirements_or_error);
}
return CombineDotRequirements(requirements, get_requirements(hlo));
}
DimOrderMap GetPropagatedDimOrdersForElementwise(
const HloInstruction& hlo, TransformDirection direction,
const DimensionOrder& src_dim_order) {
if (direction == TransformDirection::kOutputToInput) {
DimOrderMap map;
for (const HloInstruction* operand : hlo.operands()) {
map.insert({operand, src_dim_order});
}
return map;
}
return {{&hlo, src_dim_order}};
}
const HloInstruction& GetSourceHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_GE(hlo.operand_count(), 1);
if (direction == TransformDirection::kOutputToInput) {
return hlo;
}
return *hlo.operand(0);
}
using ConstInstructionVector = absl::InlinedVector<const HloInstruction*, 2>;
ConstInstructionVector GetDestHlos(const HloInstruction& hlo,
TransformDirection direction) {
if (direction == TransformDirection::kInputToOutput) {
return {&hlo};
}
ConstInstructionVector hlos;
hlos.reserve(hlo.operands().size());
for (const HloInstruction* operand : hlo.operands()) {
hlos.push_back(operand);
}
return hlos;
}
const HloInstruction& GetDestHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_EQ(hlo.operand_count(), 1);
if (direction == TransformDirection::kInputToOutput) {
return hlo;
}
return *hlo.operand(0);
}
DimOrderMapOrError GetPropagatedDimOrdersForBitcast(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
const HloInstruction& dst = GetDestHlo(hlo, direction);
const Shape& dst_shape = dst.shape();
const Fragments& src_fragments_order = src_dim_order.TensorFragmentsOrder();
DimOrderMap dst_dim_orders;
DimensionOrder& dst_dim_order =
dst_dim_orders.insert({&dst, DimensionOrder()}).first->second;
Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();
int64_t dst_remaining_size = 1;
absl::flat_hash_map<const Fragment*, std::vector<int>> src_to_dst;
auto dst_dim_it = dst_shape.layout().minor_to_major().cbegin();
const auto dst_dim_end = dst_shape.layout().minor_to_major().cend();
for (auto src_dim = src_fragments_order.cbegin();
src_dim != src_fragments_order.cend(); ++src_dim) {
auto add_new_fragment = [&](const Fragment& fragment) {
dst_fragments_order.push_back(fragment);
src_to_dst[&*src_dim].push_back(dst_fragments_order.size() - 1);
};
if (dst_remaining_size >= src_dim->full_count()) {
if (dst_remaining_size % src_dim->full_count()) {
return "Unsupported bitcast";
}
add_new_fragment(*src_dim);
dst_remaining_size /= src_dim->full_count();
} else {
int64_t src_remaining_size = src_dim->full_count();
if (dst_remaining_size > 1) {
if (src_remaining_size % dst_remaining_size || (src_dim->is_sliced())) {
return "Unsupported bitcast";
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), dst_remaining_size});
src_remaining_size /= dst_remaining_size;
dst_remaining_size = 1;
}
while (src_remaining_size > 1) {
CHECK(dst_dim_it != dst_dim_end);
int64_t dst_dim_size = dst_shape.dimensions(*dst_dim_it);
int64_t new_fragment_size = dst_dim_size;
if (dst_dim_size > src_remaining_size) {
if (dst_dim_size % src_remaining_size) {
return "Unsupported bitcast";
}
dst_remaining_size = dst_dim_size / src_remaining_size;
new_fragment_size = src_remaining_size;
}
if (src_dim->is_sliced()) {
return "Unsupported bitcast";
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), new_fragment_size});
src_remaining_size /= new_fragment_size;
++dst_dim_it;
}
}
}
CHECK_EQ(dst_remaining_size, 1);
while (dst_dim_it != dst_dim_end) {
if (dst_shape.dimensions(*dst_dim_it) != 1) {
return "Unsupported bitcast";
}
if (!dst_fragments_order.empty()) {
dst_fragments_order.push_back(
Fragment{dst_fragments_order.back().dst_dim_number(), 1});
src_to_dst[&src_fragments_order.back()].push_back(
dst_fragments_order.size() - 1);
}
++dst_dim_it;
}
FragmentOrders& dst_dim_fragment_orders = dst_dim_order.DimFragmentsOrders();
for (const auto& [dim_index, dim_sequence] :
src_dim_order.DimFragmentsOrders()) {
std::vector<int>& dst = dst_dim_fragment_orders[dim_index];
dst.reserve(dim_sequence.size());
for (const int src : dim_sequence) {
std::copy(src_to_dst[&src_fragments_order[src]].cbegin(),
src_to_dst[&src_fragments_order[src]].cend(),
std::back_inserter(dst));
}
}
return dst_dim_orders;
}
DimOrderMapOrError GetPropagatedDimOrdersForDimAlteringOp(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
std::list<Fragment> new_fragments;
const HloInstruction& src = GetSourceHlo(hlo, direction);
Fragments src_fragments_order = src_dim_order.TensorFragmentsOrder();
if (hlo.opcode() == HloOpcode::kSlice &&
ShapeUtil::IsEffectiveScalar(hlo.shape())) {
return FusionDecision("Slice to scalar is not implemented yet.");
}
std::vector<std::vector<Fragment*>> src_physical;
src_physical.reserve(src.shape().rank());
if (src_fragments_order.size() < src.shape().rank()) { | #include "xla/service/gpu/triton_tiling_propagation.h"
#include <vector>
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla::gpu {
namespace {
using TritonTilingPropagationTest = HloTestBase;
using triton_fusion::DimensionOrder;
DimensionOrder FromFragments(DimensionOrder::Fragments fragments) {
DimensionOrder dim_order;
DimensionOrder::Fragments& tensor_fragments_order =
dim_order.TensorFragmentsOrder();
DimensionOrder::FragmentOrders& dim_fragments_orders =
dim_order.DimFragmentsOrders();
for (const DimensionOrder::Fragment& fragment : fragments) {
tensor_fragments_order.push_back(fragment);
dim_fragments_orders[fragment.dst_dim_number()].push_back(
tensor_fragments_order.size());
}
return dim_order;
}
TEST_F(
TritonTilingPropagationTest,
DimensionOrdersRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
DimensionOrder::Fragment fragment_1(0, 97);
DimensionOrder::Fragment fragment_2(0, 1);
DimensionOrder dimension_order_1 = FromFragments({fragment_1, fragment_2});
DimensionOrder::Fragment fragment_3(0, 97);
DimensionOrder::Fragment fragment_4(1, 1);
DimensionOrder dimension_order_2 = FromFragments({fragment_3, fragment_4});
EXPECT_TRUE(dimension_order_1.IsPhysicallyEquivalent(dimension_order_2));
}
TEST_F(
TritonTilingPropagationTest,
IterationSpecsRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
TensorIterationSpec::IterationSpecFragment fragment_1 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec spec_1;
spec_1[0].push_back(fragment_1);
TensorIterationSpec::IterationSpecFragment fragment_2 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec::IterationSpecFragment fragment_3 = {
97, 1, 0, 1,
{1}};
TensorIterationSpec spec_2;
spec_2[0].push_back(fragment_2);
spec_2[1].push_back(fragment_3);
EXPECT_TRUE(spec_1.IsPhysicallyEquivalent(spec_2));
}
TEST_F(TritonTilingPropagationTest,
DimensionsShouldNotBeRemovedByToTensorIterationSpec) {
DimensionOrder::Fragment fragment_0(0, 97);
DimensionOrder::Fragment fragment_1(1, 1);
DimensionOrder dimension_order = FromFragments({fragment_0, fragment_1});
TensorIterationSpec spec = dimension_order.ToTensorIterationSpec();
const TensorIterationSpec::DimIterationSpec* dim_spec_0 = spec.Find(0);
EXPECT_NE(dim_spec_0, nullptr);
EXPECT_EQ(dim_spec_0->size(), 1);
EXPECT_EQ(dim_spec_0->at(0).count, 97);
const TensorIterationSpec::DimIterationSpec* dim_spec_1 = spec.Find(1);
EXPECT_NE(dim_spec_1, nullptr);
EXPECT_EQ(dim_spec_1->size(), 1);
EXPECT_EQ(dim_spec_1->at(0).count, 1);
}
}
} | 2,051 |
#ifndef XLA_SERVICE_GPU_REDUCTION_DEGENERATE_DIM_REMOVER_H_
#define XLA_SERVICE_GPU_REDUCTION_DEGENERATE_DIM_REMOVER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionDegenerateDimRemover : public HloModulePass {
public:
absl::string_view name() const override {
return "reduction-degenerate-dim-remover";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/reduction_degenerate_dim_remover.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReductionDegenerateDimRemoverVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction *hlo) override {
auto instr = Cast<HloReduceInstruction>(hlo);
absl::InlinedVector<HloInstruction *, 2> input_reshapes;
absl::InlinedVector<Shape, 2> canonical_reduce_shapes;
int idx = -1;
std::vector<int64_t> updated_reduced_dimensions;
for (HloInstruction *reduced_op : instr->inputs()) {
idx++;
const Shape &input_shape = reduced_op->shape();
const Shape &reduce_shape = instr->shape().IsTuple()
? instr->shape().tuple_shapes(idx)
: instr->shape();
if (!ShapeUtil::HasDegenerateDimensions(reduced_op->shape())) {
return absl::OkStatus();
}
Shape canonical_input_shape =
ShapeUtil::DropDegenerateDimensions(input_shape);
Shape canonical_reduce_shape =
ShapeUtil::DropDegenerateDimensions(reduce_shape);
auto reduced_dimensions = instr->dimensions();
int64_t shift = 0;
for (int dim = 0; dim < input_shape.rank(); dim++) {
if (input_shape.dimensions(dim) == 1) {
shift++;
} else {
if (absl::c_linear_search(reduced_dimensions, dim) && idx == 0) {
updated_reduced_dimensions.push_back(dim - shift);
}
}
}
if (updated_reduced_dimensions.empty()) {
std::unique_ptr<HloInstruction> reshape =
HloInstruction::CreateBitcast(reduce_shape, reduced_op);
return ReplaceWithNewInstruction(instr, std::move(reshape));
}
input_reshapes.push_back(instr->parent()->AddInstruction(
HloInstruction::CreateBitcast(canonical_input_shape, reduced_op)));
canonical_reduce_shapes.push_back(canonical_reduce_shape);
}
Shape canonical_reduce_shape =
ShapeUtil::MakeMaybeTupleShape(canonical_reduce_shapes);
const Shape &orig_reduce_shape = instr->shape();
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
canonical_reduce_shape, input_reshapes, instr->init_values(),
updated_reduced_dimensions, instr->to_apply());
instr->SetupDerivedInstruction(new_reduce.get());
if (canonical_reduce_shape != instr->shape()) {
HloInstruction *wrapped_reduce =
instr->parent()->AddInstruction(std::move(new_reduce));
absl::InlinedVector<HloInstruction *, 2> out;
if (!canonical_reduce_shape.IsTuple()) {
new_reduce =
HloInstruction::CreateBitcast(orig_reduce_shape, wrapped_reduce);
} else {
for (int oidx = 0; oidx < instr->input_count(); oidx++) {
HloInstruction *gte = instr->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx));
out.push_back(
instr->parent()->AddInstruction(HloInstruction::CreateBitcast(
orig_reduce_shape.tuple_shapes(oidx), gte)));
}
new_reduce = HloInstruction::CreateTuple(out);
}
}
return ReplaceWithNewInstruction(instr, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionDegenerateDimRemover::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
ReductionDegenerateDimRemoverVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_degenerate_dim_remover.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionDegenerateDimRemoverTest : public HloTestBase {
public:
void CheckDegenerateDimRemover(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionDegenerateDimRemover{},
expected);
}
};
TEST_F(ReductionDegenerateDimRemoverTest, ReductionWithDegenerateDimensions) {
const char* hlo = R"(
HloModule ReduceWithDegenerateDimensions
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1,3,1,4,1,5,1] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[1,1,1,1] reduce(input, zero), dimensions={1,3,5}, to_apply=add
}
)";
CheckDegenerateDimRemover(hlo, R"(
)");
}
TEST_F(ReductionDegenerateDimRemoverTest,
ReductionWithDegenerateDimensionsVariadic) {
const char* hlo = R"(
HloModule ReduceWithDegenerateDimensions
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
input = f32[1,3,1,4,1,5,1] parameter(0)
idxs = u32[1,3,1,4,1,5,1] parameter(1)
zero = f32[] constant(0)
zero_idx = u32[] constant(0)
ROOT out = (f32[1,1,1,1], u32[1,1,1,1]) reduce(input, idxs, zero, zero_idx), dimensions={1,3,5}, to_apply=argmax
}
)";
CheckDegenerateDimRemover(hlo, R"(
)");
}
TEST_F(ReductionDegenerateDimRemoverTest, DegenerateWithEmptyDimension) {
const char* hlo = R"(
HloModule ReduceWithDegenerateDimensions
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1,3,1,4,1,5,1] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[3,4,5,1] reduce(input, zero), dimensions={0,2,4}, to_apply=add
}
)";
CheckDegenerateDimRemover(hlo,
R"(
)");
}
}
} | 2,052 |
#ifndef XLA_SERVICE_GPU_GPU_WINDOWED_EINSUM_HANDLER_H_
#define XLA_SERVICE_GPU_GPU_WINDOWED_EINSUM_HANDLER_H_
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla::gpu {
class GpuWindowedEinsumHandler : public HloModulePass {
public:
absl::string_view name() const override {
return "gpu-windowed-einsum-handler";
}
struct WindowedEinsumAgLoops {
explicit WindowedEinsumAgLoops(HloInstruction* loop) : loop(loop) {}
HloInstruction* loop;
bool consumed = false;
};
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
constexpr static const char* kWindowedEinsumRsLoopName =
"windowed_dot_general_body_rs";
constexpr static const char* kWindowedEinsumAgLoopName =
"windowed_dot_general_body_ag";
private:
std::vector<WindowedEinsumAgLoops> all_ag_loops_;
};
}
#endif
#include "xla/service/gpu/gpu_windowed_einsum_handler.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = match;
absl::Status ShiftDequantizationF8(const HloComputation* comp,
const std::array<HloInstruction*, 2>& gte) {
HloInstruction* while_instr = comp->WhileCallInstruction();
if (!while_instr) {
return absl::OkStatus();
}
HloInstruction* param_tuple = while_instr->mutable_operand(0);
std::array<HloInstruction*, 2> binaries, operands, scales;
for (int k = 0; k < 2; ++k) {
if (!Match(param_tuple->mutable_operand(k),
m::AnyOf<HloInstruction>(
m::Divide(&binaries[k], m::Convert(m::Op(&operands[k])),
m::Broadcast(m::Op(&scales[k]))),
m::MultiplyAnyOrder(&binaries[k],
m::Convert(m::Op(&operands[k])),
m::Broadcast(m::Op(&scales[k])))))) {
VLOG(5) << "Unable to identify FP8 dequantization pattern.";
return absl::OkStatus();
}
}
std::array<PrimitiveType, 2> operand_types{
operands[0]->shape().element_type(), operands[1]->shape().element_type()};
if (!((operand_types[0] == F8E4M3FN && operand_types[1] == F8E4M3FN) ||
(operand_types[0] == F8E4M3FN && operand_types[1] == F8E5M2) ||
(operand_types[0] == F8E5M2 && operand_types[1] == F8E4M3FN))) {
VLOG(5) << "Unsupported types.";
return absl::OkStatus();
}
for (int k = 0; k < 2; ++k) {
if (binaries[k]->shape().element_type() != BF16 &&
binaries[k]->shape().element_type() != F16 &&
binaries[k]->shape().element_type() != F32) {
VLOG(5) << "Unsupported types.";
return absl::OkStatus();
}
}
if (!ShapeUtil::IsScalar(scales[0]->shape()) ||
!ShapeUtil::IsScalar(scales[1]->shape())) {
VLOG(5) << "Scaling factors must be scalars.";
return absl::OkStatus();
}
HloComputation* while_body = while_instr->while_body();
HloComputation* while_condition = while_instr->while_condition();
HloInstruction* while_root = while_body->root_instruction();
std::array<HloInstruction*, 2> dots, dyn_slices{nullptr, nullptr},
coll_perms{nullptr, nullptr};
if (Match(
while_root,
m::Tuple(m::CollectivePermute(
&coll_perms[1], m::CollectivePermute(
&coll_perms[0], m::Op().Is(gte[0]))),
m::Op().Is(gte[1]),
m::DynamicUpdateSlice(
m::DynamicUpdateSlice().WithOperand(
1, m::Dot(&dots[0], m::Op().Is(gte[0]),
m::Op().Is(gte[1]))),
m::Dot(&dots[1], m::Op(), m::Op().Is(gte[1])), m::Op(),
m::Op(), m::Op()),
m::Op(), m::Op()))) {
VLOG(5) << "Identified all-gather windowed einsum pattern.";
} else if (Match(
while_root,
m::Tuple(m::Op().Is(gte[0]), m::Op().Is(gte[1]),
m::AddAnyOrder(
m::Dot(&dots[0], m::DynamicSlice(&dyn_slices[0]),
m::Op().Is(gte[1])),
m::Op()),
m::CollectivePermute(m::AddAnyOrder(
m::Dot(&dots[1], m::DynamicSlice(&dyn_slices[1]),
m::Op().Is(gte[1])),
m::Op())),
m::Op()))) {
VLOG(5) << "Identified reduce-scatter windowed einsum pattern.";
} else {
VLOG(5) << "Unable to identify valid windowed einsum pattern.";
return absl::OkStatus();
}
for (int k = 0; k < 2; ++k) {
TF_RETURN_IF_ERROR(
param_tuple->ReplaceOperandWithDifferentShape(k, operands[k]));
ShapeUtil::UpdateTupleShape(operands[k]->shape(), k,
param_tuple->mutable_shape());
param_tuple->AppendOperand(scales[k]);
ShapeUtil::AppendShapeToTuple(scales[k]->shape(),
param_tuple->mutable_shape());
}
for (HloComputation* while_comp : {while_body, while_condition}) {
while_comp->ReplaceParameter(
0, HloInstruction::CreateParameter(
0, param_tuple->shape(),
while_comp->parameter_instruction(0)->name()));
}
HloInstruction* body_param = while_body->parameter_instruction(0);
for (int k = 0; k < 2; ++k) {
TF_ASSIGN_OR_RETURN(HloInstruction * operand_f8,
MakeGetTupleElementHlo(body_param, k));
if (while_root->operand(k) == gte[k]) {
TF_RETURN_IF_ERROR(
while_root->ReplaceOperandWithDifferentShape(k, operand_f8));
ShapeUtil::UpdateTupleShape(operand_f8->shape(), k,
while_root->mutable_shape());
}
TF_ASSIGN_OR_RETURN(
HloInstruction * operand_scale,
MakeGetTupleElementHlo(
body_param, body_param->shape().tuple_shapes_size() - 2 + k));
while_root->AppendOperand(operand_scale);
ShapeUtil::AppendShapeToTuple(operand_scale->shape(),
while_root->mutable_shape());
HloInstruction* operand_f32 =
MakeConvertToHlo(operand_f8, gte[k]->shape().element_type());
HloInstruction* broadcast_scale =
MakeBroadcastHlo(operand_scale, {}, operand_f32->shape());
TF_ASSIGN_OR_RETURN(
HloInstruction * operand_scaled,
MakeBinaryHlo(binaries[k]->opcode(), operand_f32, broadcast_scale));
for (int l = 0; l < 2; ++l) {
if (dots[l]->operand(k) == gte[k]) {
TF_RETURN_IF_ERROR(dots[l]->ReplaceOperandWith(k, operand_scaled));
}
if (dyn_slices[l] && dyn_slices[l]->operand(0) == gte[k]) {
TF_RETURN_IF_ERROR(
dyn_slices[l]->ReplaceOperandWith(0, operand_scaled));
}
}
if (coll_perms[0] && coll_perms[0]->operand(0) == gte[k]) {
std::array<HloInstruction*, 2> coll_perms_f8{nullptr, nullptr};
coll_perms_f8[0] =
while_body->AddInstruction(coll_perms[0]->CloneWithNewOperands(
operand_f8->shape(), {operand_f8}));
coll_perms_f8[1] =
while_body->AddInstruction(coll_perms[1]->CloneWithNewOperands(
coll_perms_f8[0]->shape(), {coll_perms_f8[0]}));
HloInstruction* coll_perm0_f32 =
MakeConvertToHlo(coll_perms_f8[0], gte[k]->shape().element_type());
TF_ASSIGN_OR_RETURN(HloInstruction * x_scaled,
MakeBinaryHlo(binaries[k]->opcode(), coll_perm0_f32,
broadcast_scale));
TF_RETURN_IF_ERROR(dots[1]->ReplaceOperandWith(0, x_scaled));
TF_RETURN_IF_ERROR(
while_root->ReplaceOperandWithDifferentShape(0, coll_perms_f8[1]));
ShapeUtil::UpdateTupleShape(coll_perms_f8[1]->shape(), 0,
while_root->mutable_shape());
}
}
TF_RETURN_IF_ERROR(
while_instr->ReplaceAllUsesWithDifferentShape(while_instr->AddInstruction(
while_instr->CloneWithNewShape(while_root->shape()))));
TF_RETURN_IF_ERROR(while_instr->parent()->RemoveInstruction(while_instr));
if (coll_perms[0]) {
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[1]));
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[0]));
}
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gte[0]));
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gte[1]));
VLOG(5) << "FP8 dequantization moved into while loop.";
return absl::OkStatus();
}
int64_t NumberOfInstructionsInComp(const HloComputation* comp, HloOpcode op) {
int64_t total_count = 0;
for (const HloInstruction* inst : comp->instructions()) {
if (inst->opcode() == op) {
++total_count;
}
}
return total_count;
}
absl::Status UpdateDotAndConsumerConfig(HloInstruction* dot,
int64_t stream_id) {
auto dot_gpu_config = dot->backend_config<gpu::GpuBackendConfig>();
HloInstruction* updater = dot->users()[0];
auto updater_gpu_config = updater->backend_config<gpu::GpuBackendConfig>();
dot_gpu_config->set_operation_queue_id(stream_id);
updater_gpu_config->mutable_wait_on_operation_queues()->Add(stream_id);
TF_RETURN_IF_ERROR(dot->set_backend_config(dot_gpu_config.value()));
TF_RETURN_IF_ERROR(updater->set_backend_config(updater_gpu_config.value()));
return absl::OkStatus();
}
absl::Status SetForceDelayForInstruction(HloInstruction* instr,
bool force_delay) {
auto gpu_config = instr->backend_config<gpu::GpuBackendConfig>();
gpu_config->set_force_earliest_schedule(force_delay);
TF_RETURN_IF_ERROR(instr->set_backend_config(gpu_config.value()));
return absl::OkStatus();
}
absl::StatusOr<bool> HandleRsWindowedEinsumLoop(HloComputation* comp,
int64_t stream_id) {
bool changed = false;
if (NumberOfInstructionsInComp(comp, HloOpcode::kDot) <= 1) {
return changed;
}
for (auto inst : comp->MakeInstructionPostOrder()) {
HloInstruction* matched_dot;
std::array<HloInstruction*, 2> gte;
if (Match(inst,
m::Dot(&matched_dot,
m::DynamicSlice().WithOperand(
0, m::GetTupleElement(>e[0], m::Parameter(), 0)),
m::GetTupleElement(>e[1], m::Parameter(), 1)))) {
TF_RETURN_IF_ERROR(ShiftDequantizationF8(comp, gte));
TF_RETURN_IF_ERROR(UpdateDotAndConsumerConfig(matched_dot, stream_id));
++stream_id;
changed = true;
}
HloInstruction* matched_cp;
if (Match(inst, m::CollectivePermute(
&matched_cp, m::GetTupleElement(m::Parameter(), 2)))) {
TF_RETURN_IF_ERROR(
SetForceDelayForInstruction(matched_cp, true));
changed = true;
}
}
return changed;
}
absl::StatusOr<bool> HandleAgWindowedEinsumLoop(HloComputation* comp,
int64_t stream_id) {
bool changed = false;
if (NumberOfInstructionsInComp(comp, HloOpcode::kDot) <= 1) {
return changed;
}
for (auto inst : comp->MakeInstructionPostOrder()) {
HloInstruction* matched_dot;
std::array<HloInstruction*, 2> gte;
if (Match(inst, m::Dot(&matched_dot,
m::GetTupleElement(>e[0], m::Parameter(), 0),
m::GetTupleElement(>e[1], m::Parameter(), 1)))) {
TF_RETURN_IF_ERROR(ShiftDequantizationF8(comp, gte));
TF_RETURN_IF_ERROR(UpdateDotAndConsumerConfig(matched_dot, stream_id));
++stream_id;
TF_RETURN_IF_ERROR(
SetForceDelayForInstruction(matched_dot, true));
changed = true;
}
HloInstruction* matched_cp;
if (Match(inst, m::CollectivePermute(
&matched_cp, m::GetTupleElement(m::Parameter(), 0)))) {
TF_RETURN_IF_ERROR(
SetForceDelayForInstruction(matched_cp, true));
changed = true;
}
}
return changed;
}
static int64_t GetAgActivationCacheIndex(const HloInstruction* while_loop) {
const HloInstruction* loop_tuple = while_loop->operand(0);
const Shape& tuple_shape = loop_tuple->shape();
CHECK(tuple_shape.IsTuple());
return tuple_shape.tuple_shapes_size();
}
absl::Status ProcessWindowedEinsumLoopForActivationCaching(
GpuWindowedEinsumHandler::WindowedEinsumAgLoops& ag_loop,
HloInstruction* ag_with_shared_operand) {
HloInstruction* loop = ag_loop.loop;
HloComputation* while_body = loop->while_body();
HloInstruction* input_gte;
for (HloInstruction* gte : while_body->parameter_instruction(0)->users()) {
if (gte->tuple_index() == 0) {
input_gte = gte;
}
}
HloInstruction* root = while_body->root_instruction();
HloInstruction* input_tuple = while_body->parameter_instruction(0);
const Shape& input_shape = input_tuple->shape();
int64_t full_cache_buffer_index = GetAgActivationCacheIndex(loop);
std::vector<Shape> new_input_shapes(input_shape.tuple_shapes().begin(),
input_shape.tuple_shapes().end());
new_input_shapes.push_back(ag_with_shared_operand->shape());
Shape new_input_shape = ShapeUtil::MakeTupleShape(new_input_shapes);
*input_tuple->mutable_shape() = new_input_shape;
HloInstruction* full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
ag_with_shared_operand->shape(), input_tuple,
full_cache_buffer_index));
HloComputation* cond_comp = loop->while_condition();
HloInstruction* cond_input_tuple = cond_comp->parameter_instruction(0);
*cond_input_tuple->mutable_shape() = new_input_shape;
HloInstruction* original_while_input = loop->mutable_operand(0);
HloComputation* parent_comp = loop->parent();
std::vector<HloInstruction*> new_operands(
original_while_input->operands().begin(),
original_while_input->operands().end());
new_operands.push_back(
parent_comp->AddInstruction(HloInstruction::CreateBroadcast(
ag_with_shared_operand->shape(),
parent_comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(new_input_shapes[0].element_type()))),
{})));
HloInstruction* new_while_input =
parent_comp->AddInstruction(HloInstruction::CreateTuple(new_operands));
TF_RETURN_IF_ERROR(
loop->ReplaceOperandWithDifferentShape(0, new_while_input));
TF_RETURN_IF_ERROR(parent_comp->ReplaceInstructionWithDifferentShape(
original_while_input, new_while_input));
*loop->mutable_shape() = new_input_shape;
HloInstruction* new_full_buffer_output = nullptr;
HloInstruction* dus_boundary_constant;
HloInstruction* first_cp_output;
for (HloInstruction* gte_user : input_gte->users()) {
if (gte_user->opcode() == HloOpcode::kCollectivePermute) {
first_cp_output = gte_user;
break;
}
}
for (HloInstruction* inst : while_body->MakeInstructionPostOrder()) {
HloInstruction* slice_indices;
if (Match(inst,
m::DynamicUpdateSlice(
m::GetTupleElement(m::Parameter()), m::Op(),
m::Constant(&dus_boundary_constant),
m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())),
m::Op()))) {
slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape(
dus_boundary_constant->shape(), slice_indices));
VLOG(5) << "Created slice op for first slice: "
<< slice_indices->ToString();
full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
input_gte,
{dus_boundary_constant, slice_indices, dus_boundary_constant}));
}
if (Match(inst,
m::DynamicUpdateSlice(
m::DynamicUpdateSlice(), m::Op(), m::Constant(),
m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())),
m::Op()))) {
slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape(
dus_boundary_constant->shape(), slice_indices));
VLOG(5) << "Created slice op for second slice: "
<< slice_indices->ToString();
new_full_buffer_output =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
first_cp_output,
{dus_boundary_constant, slice_indices, dus_boundary_constant}));
}
HloInstruction* slice_index;
HloInstruction* ds_index_constant;
HloInstruction* remainder;
HloInstruction* ds_param;
if (Match(inst, m::Dot(m::Op(), m::DynamicSlice(&ds_param))) &&
Match(ds_param->operand(0), m::GetTupleElement(m::Parameter(), 1))) {
for (int64_t ds_op_i = 1; ds_op_i < ds_param->operands().size();
ds_op_i++) {
if (!Match(
ds_param->mutable_operand(ds_op_i),
m::Reshape(&slice_index, m::DynamicSlice(m::Constant(),
m::Op(&remainder)))) &&
!Match(ds_param->mutable_operand(ds_op_i),
m::Constant(&ds_index_constant))) {
return absl::OkStatus();
}
}
if (Match(remainder,
m::Remainder(m::Add(m::GetTupleElement(), m::Op()), m::Op()))) {
full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
input_gte,
{ds_index_constant, ds_index_constant, slice_index}));
}
if (Match(remainder,
m::Remainder(
m::Add(m::Add(m::GetTupleElement(), m::Op()), m::Op()),
m::Op()))) {
new_full_buffer_output =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
first_cp_output,
{ds_index_constant, ds_index_constant, slice_index}));
}
}
}
std::vector<HloInstruction*> original_operands(root->operands().begin(),
root->operands().end());
original_operands.push_back(new_full_buffer_output);
HloInstruction* new_output_tuple = while_body->AddInstruction(
HloInstruction::CreateTuple(original_operands));
TF_RETURN_IF_ERROR(
while_body->ReplaceInstructionWithDifferentShape(root, new_output_tuple));
return absl::OkStatus();
}
bool HasReplicaGroups(const HloInstruction* inst) {
return inst->replica_groups().size() > 0;
}
bool ShouldAddToChain(const HloInstruction* inst) {
switch (inst->opcode()) {
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
case HloOpcode::kCopy:
return inst->user_count() == 1;
default:
return false;
}
}
struct MatchedGemmA2aResult {
HloInstruction* producer_gemm;
HloInstruction* lhs;
HloInstruction* rhs;
HloInstruction* a2a_replacement = nullptr;
bool matched = false;
};
class WindowedEinsumVisitor : public DfsHloRewriteVisitor {
public:
explicit WindowedEinsumVisitor(
std::vector<GpuWindowedEinsumHandler::WindowedEinsumAgLoops>&
all_ag_loops)
: all_ag_loops_(all_ag_loops) {}
absl::StatusOr<bool> MatchA2aGemmWithIntermediateReshapes(
HloInstruction* dot, HloInstruction** lhs, HloInstruction** rhs) {
if (Match(dot, m::Dot(m::AllToAll(lhs).WithOneUse().WithPredicate(
HasReplicaGroups),
m::Op(rhs))) &&
!DynCast<HloAllToAllInstruction>((*lhs))->constrain_layout() &&
!(*lhs)->shape().IsTuple()) {
return true;
}
std::vector<HloInstruction*> allowed_intermediate_ops(
{dot->mutable_operand(0)});
HloAllToAllInstruction* matched_a2a = nullptr;
while (true) {
HloInstruction* curr = allowed_intermediate_ops.back();
if (ShouldAddToChain(curr)) {
allowed_intermediate_ops.insert(allowed_intermediate_ops.end(),
std::begin(curr->operands()),
std::end(curr->operands()));
} else if (curr->opcode() == HloOpcode::kAllToAll &&
curr->user_count() == 1) {
matched_a2a = DynCast<HloAllToAllInstruction>(curr);
allowed_intermediate_ops.pop_back();
break;
} else {
return false;
}
}
CHECK(matched_a2a != nullptr);
if (matched_a2a->constrain_layout() || matched_a2a->shape().IsTuple() ||
!HasReplicaGroups(matched_a2a) || !matched_a2a->split_dimension()) {
return false;
}
int64_t split_dimension = *matched_a2a->split_dimension();
for (int64_t i = allowed_intermediate_ops.size() - 1; i >= 0; i--) {
HloInstruction* current_op = allowed_intermediate_ops[i];
if (current_op->opcode() == HloOpcode::kReshape) {
std::vector<std::pair<int64_t, int64_t>> unmodified_dims =
ShapeUtil::DimensionsUnmodifiedByReshape(
current_op->operand(0)->shape(), current_op->shape());
auto it = absl::c_find_if(
unmodified_dims,
[&split_dimension](std::pair<int64_t, int64_t>& dim_pair) {
return dim_pair.first == split_dimension;
});
if (it == unmodified_dims.end()) {
VLOG(5) << "Split dimension of: " << matched_a2a->ToShortString()
<< " has been modified by reshapes. Skip process it for "
"decomposition.";
return false;
}
split_dimension = it->second;
} else if (current_op->opcode() == HloOpcode::kTranspose) {
const auto& transpose_dims = current_op->dimensions();
for (int64_t j = 0; j < transpose_dims.size(); j++) {
if ((int64_t)transpose_dims[j] == split_dimension) {
split_dimension = j;
break;
}
}
}
}
TF_RETURN_IF_ERROR(allowed_intermediate_ops.back()->ReplaceOperandWith(
0, matched_a2a->mutable_operand(0)));
HloInstruction* new_a2a =
matched_a2a->parent()->AddInstruction(HloInstruction::CreateAllToAll(
allowed_intermediate_ops.front()->shape(),
{allowed_intermediate_ops.front()}, matched_a2a->replica_groups(),
false, hlo_query::NextChannelId(*matched_a2a->GetModule()),
split_dimension));
TF_RETURN_IF_ERROR(dot->ReplaceOperandWith(0, new_a2a));
TF_RETURN_IF_ERROR(
matched_a2a->parent()->RemoveInstructionAndUnusedOperands(matched_a2a));
MarkAsChanged();
*lhs = new_a2a;
*rhs = dot->mutable_operand(1);
return true;
}
absl::Status HandleDot(HloInstruction* dot) override {
CHECK_EQ(dot->opcode(), HloOpcode::kDot);
HloComputation* comp = dot->parent();
for (GpuWindowedEinsumHandler::WindowedEinsumAgLoops ag_loop : | #include "xla/service/gpu/gpu_windowed_einsum_handler.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
using GpuWindowedEinsumHanlderTest = HloTestBase;
HloInstruction* FindInstructionByName(HloComputation* comp, std::string name) {
for (auto inst : comp->instructions()) {
if (inst->name() == name) {
return inst;
}
}
return nullptr;
}
TEST_F(GpuWindowedEinsumHanlderTest, AgLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,512,24576]{2,1,0}, bf16[24576,24576]{1,0})->bf16[2048,24576]{1,0}}, num_partitions=4
windowed_dot_general_body_ag.1 {
param = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0)
get-tuple-element = bf16[512,24576]{1,0} get-tuple-element(param), index=0
collective-permute = bf16[512,24576]{1,0} collective-permute(get-tuple-element), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
get-tuple-element.1 = bf16[24576,24576]{1,0} get-tuple-element(param), index=1
get-tuple-element.2 = bf16[2048,24576]{1,0} get-tuple-element(param), index=2
dot.2 = bf16[512,24576]{1,0} dot(get-tuple-element, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
constant.1 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.4 = u32[] get-tuple-element(param), index=4
partition-id = u32[] partition-id()
add = u32[] add(get-tuple-element.4, partition-id)
constant = u32[] constant(4)
remainder = u32[] remainder(add, constant)
dynamic-slice = s32[1]{0} dynamic-slice(constant.1, remainder), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice)
constant.2 = s32[] constant(0)
dynamic-update-slice = bf16[2048,24576]{1,0} dynamic-update-slice(get-tuple-element.2, dot.2, reshape.4, constant.2), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
dot.3 = bf16[512,24576]{1,0} dot(collective-permute, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.3 = u32[] constant(1)
add.1 = u32[] add(get-tuple-element.4, constant.3)
add.2 = u32[] add(add.1, partition-id)
remainder.1 = u32[] remainder(add.2, constant)
dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.1, remainder.1), dynamic_slice_sizes={1}
reshape.5 = s32[] reshape(dynamic-slice.1)
dynamic-update-slice.1 = bf16[2048,24576]{1,0} dynamic-update-slice(dynamic-update-slice, dot.3, reshape.5, constant.2)
get-tuple-element.3 = bf16[2048,24576]{1,0} get-tuple-element(param), index=3
add.3 = u32[] add(add.1, constant.3)
ROOT tuple = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(collective-permute, get-tuple-element.1, dynamic-update-slice.1, get-tuple-element.3, add.3)
}
windowed_dot_general_cond_ag {
param.1 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4
constant.8 = u32[] constant(4)
ROOT compare = pred[] compare(get-tuple-element.5, constant.8), direction=LT
}
ENTRY test_main {
param.4 = bf16[1,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
reshape.8 = bf16[512,24576]{1,0} reshape(param.4)
param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
constant.18 = bf16[] constant(0)
broadcast = bf16[2048,24576]{1,0} broadcast(constant.18), dimensions={}
constant.20 = u32[] constant(0)
tuple.2 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(reshape.8, param.5, broadcast, broadcast, constant.20)
while = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag.1
ROOT get-tuple-element.13 = bf16[2048,24576]{1,0} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* ag_loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloComputation* ag_loop_body = ag_loop->while_body();
HloInstruction* inst = FindInstructionByName(ag_loop_body, "dot.2");
EXPECT_GT(inst->backend_config<GpuBackendConfig>()->operation_queue_id(), 0);
EXPECT_TRUE(
inst->backend_config<GpuBackendConfig>()->force_earliest_schedule());
HloInstruction* cp1 =
FindInstructionByName(ag_loop_body, "collective-permute");
EXPECT_TRUE(
cp1->backend_config<GpuBackendConfig>()->force_earliest_schedule());
}
TEST_F(GpuWindowedEinsumHanlderTest, RsLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[2048,24576]{1,0})->bf16[512,24576]{1,0}}, num_partitions=4
windowed_dot_general_body_rs_clone.1 {
param.2 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.6 = bf16[2048,24576]{1,0} get-tuple-element(param.2), index=0
get-tuple-element.7 = bf16[24576,24576]{1,0} get-tuple-element(param.2), index=1
get-tuple-element.9 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=2
collective-permute.1 = bf16[512,24576]{1,0} collective-permute(get-tuple-element.9), channel_id=4, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
constant.10 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.11 = u32[] get-tuple-element(param.2), index=4
constant.12 = u32[] constant(2)
add.8 = u32[] add(get-tuple-element.11, constant.12)
constant.13 = u32[] constant(1)
add.9 = u32[] add(add.8, constant.13)
partition-id.3 = u32[] partition-id()
add.10 = u32[] add(add.9, partition-id.3)
constant.9 = u32[] constant(4)
remainder.3 = u32[] remainder(add.10, constant.9)
dynamic-slice.4 = s32[1]{0} dynamic-slice(constant.10, remainder.3), dynamic_slice_sizes={1}
reshape.7 = s32[] reshape(dynamic-slice.4)
constant.11 = s32[] constant(0)
dynamic-slice.5 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.7, constant.11), dynamic_slice_sizes={512,24576}
dot.7 = bf16[512,24576]{1,0} dot(dynamic-slice.5, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
add.11 = bf16[512,24576]{1,0} add(collective-permute.1, dot.7), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
get-tuple-element.10 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=3
add.6 = u32[] add(get-tuple-element.11, partition-id.3)
remainder.2 = u32[] remainder(add.6, constant.9)
dynamic-slice.2 = s32[1]{0} dynamic-slice(constant.10, remainder.2), dynamic_slice_sizes={1}
reshape.6 = s32[] reshape(dynamic-slice.2)
dynamic-slice.3 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.6, constant.11), dynamic_slice_sizes={512,24576}
dot.5 = bf16[512,24576]{1,0} dot(dynamic-slice.3, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
add.7 = bf16[512,24576]{1,0} add(get-tuple-element.10, dot.5), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
collective-permute.2 = bf16[512,24576]{1,0} collective-permute(add.7), channel_id=5, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}
ROOT tuple.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(get-tuple-element.6, get-tuple-element.7, add.11, collective-permute.2, add.8)
}
windowed_dot_general_cond_rs {
param.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.12 = u32[] get-tuple-element(param.3), index=4
constant.17 = u32[] constant(4)
ROOT compare.1 = pred[] compare(get-tuple-element.12, constant.17), direction=LT
}
ENTRY main.9_spmd {
param.6 = bf16[24576,24576]{1,0} parameter(0), sharding={devices=[4,1]<=[4]}
param.7 = bf16[512,24576]{1,0} parameter(1)
param.8 = bf16[2048,24576]{1,0} parameter(2)
constant.20 = u32[] constant(0)
tuple.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(param.8, param.6, param.7, param.7, constant.20)
while.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) while(tuple.3), condition=windowed_dot_general_cond_rs, body=windowed_dot_general_body_rs_clone.1
ROOT get-tuple-element.14 = bf16[512,24576]{1,0} get-tuple-element(while.1), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* rs_loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloComputation* rs_loop_body = rs_loop->while_body();
HloInstruction* inst = FindInstructionByName(rs_loop_body, "dot.7");
EXPECT_TRUE(inst->backend_config<GpuBackendConfig>()->operation_queue_id() >
0);
HloInstruction* cp1 =
FindInstructionByName(rs_loop_body, "collective-permute.1");
EXPECT_TRUE(
cp1->backend_config<GpuBackendConfig>()->force_earliest_schedule());
}
TEST_F(GpuWindowedEinsumHanlderTest, AgLoopsMultipleConsumersAreChained) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[24576,24576]{1,0})->bf16[2,2048,24576]{2,1,0}}, num_partitions=4
windowed_dot_general_body_ag {
param.1 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element.1 = bf16[2,512,24576]{2,1,0} get-tuple-element(param.1), index=0
collective-permute = bf16[2,512,24576]{2,1,0} collective-permute(get-tuple-element.1), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
collective-permute.1 = bf16[2,512,24576]{2,1,0} collective-permute(collective-permute), channel_id=3, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
get-tuple-element.2 = bf16[24576,24576]{1,0} get-tuple-element(param.1), index=1
get-tuple-element.3 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=2
dot = bf16[2,512,24576]{2,1,0} dot(get-tuple-element.1, get-tuple-element.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.2 = s32[] constant(0)
constant.3 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4
partition-id = u32[] partition-id()
add = u32[] add(get-tuple-element.5, partition-id)
constant.1 = u32[] constant(4)
remainder = u32[] remainder(add, constant.1)
dynamic-slice = s32[1]{0} dynamic-slice(constant.3, remainder), dynamic_slice_sizes={1}
reshape = s32[] reshape(dynamic-slice)
dynamic-update-slice = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(get-tuple-element.3, dot, constant.2, reshape, constant.2)
dot.1 = bf16[2,512,24576]{2,1,0} dot(collective-permute, get-tuple-element.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.5 = u32[] constant(1)
add.1 = u32[] add(get-tuple-element.5, constant.5)
add.2 = u32[] add(add.1, partition-id)
remainder.1 = u32[] remainder(add.2, constant.1)
dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.3, remainder.1), dynamic_slice_sizes={1}
reshape.1 = s32[] reshape(dynamic-slice.1)
dynamic-update-slice.1 = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(dynamic-update-slice, dot.1, constant.2, reshape.1, constant.2)
get-tuple-element.4 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=3
add.3 = u32[] add(add.1, constant.5)
ROOT tuple = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(collective-permute.1, get-tuple-element.2, dynamic-update-slice.1, get-tuple-element.4, add.3)
}
windowed_dot_general_cond_ag {
param = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(param), index=4
constant = u32[] constant(4)
ROOT compare = pred[] compare(get-tuple-element, constant), direction=LT
}
ENTRY main.12_spmd {
param.4 = bf16[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
constant.22 = bf16[] constant(0)
broadcast = bf16[2,2048,24576]{2,1,0} broadcast(constant.22), dimensions={}
constant.24 = u32[] constant(0)
tuple.2 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(param.4, param.5, broadcast, broadcast, constant.24)
while = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag
get-tuple-element.13 = bf16[2,2048,24576]{2,1,0} get-tuple-element(while), index=2
copy.1 = bf16[2,2048,24576]{2,1,0} copy(get-tuple-element.13)
all-gather = bf16[2,2048,24576]{2,1,0} all-gather(param.4), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
param.6 = bf16[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
ROOT dot.7 = bf16[2,2048,24576]{2,1,0} dot(all-gather, param.6), lhs_contracting_dims={2}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* ag_loop =
FindInstructionByName(module->entry_computation(), "while");
HloInstruction* inst =
FindInstructionByName(module->entry_computation(), "dot.7");
EXPECT_EQ(inst->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(inst->operand(0)->tuple_index(), 5);
EXPECT_EQ(inst->operand(0)->operand(0), ag_loop);
HloInstruction* ag_while_root = ag_loop->while_body()->root_instruction();
EXPECT_THAT(ag_while_root,
GmockMatch(m::Tuple(
m::Op(), m::Op(), m::Op(), m::Op(), m::Op(),
m::DynamicUpdateSlice(
m::DynamicUpdateSlice(
m::GetTupleElement(m::Parameter())
.WithPredicate([](const HloInstruction* instr) {
return instr->tuple_index() == 5;
}),
m::Op(), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op(), m::Op()))));
}
TEST_F(GpuWindowedEinsumHanlderTest, A2aGemmHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,8192]{3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=8
ENTRY main.9_spmd {
param0 = bf16[1,8192,32768]{2,1,0} parameter(0)
param1 = bf16[1,4,2048,8192]{3,2,1,0} parameter(1)
all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(param1), channel_id=4, replica_groups={{0,1,2,3},{4,5,6,7}}, dimensions={1}
ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(all-to-all, param0), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} parameter(1)
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [6144:8192]}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [4096:6144]}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [2048:4096]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:2048]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,32768]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT0:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[BROADCAST:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["5"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT1:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD0:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["6"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT2:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD1:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["7"],"force_earliest_schedule":false}
CHECK: ROOT {{.*}} = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT3:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD2:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(GpuWindowedEinsumHanlderTest, GemmA2aHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,32768]{3,2,1,0})->bf16[1,4,2048,8192]{3,2,1,0}}, num_partitions=4
ENTRY main.9_spmd {
param.9 = bf16[1,8192,32768]{2,1,0} parameter(0)
param.10 = bf16[1,4,2048,32768]{3,2,1,0} parameter(1)
dot.12 = bf16[1,4,2048,8192]{3,2,1,0} dot(param.10, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}
ROOT all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(dot.12), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} parameter(1)
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [24576:32768]}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [24576:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE0:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT0:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [16384:24576]}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [16384:24576]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE1:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT1:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [8192:16384]}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [8192:16384]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE2:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT2:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:8192]}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [0:8192]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE3:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT3:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,8192]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A0:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[BROADCAST:.*]])
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A1:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD0:.*]])
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A2:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD1:.*]])
CHECK: ROOT {{.*}} = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A3:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD2:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
GpuWindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(GpuWindowedEinsumHanlderTest, A2aTransposeLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=4
ENTRY main.9_spmd {
param.9 = bf16[1,8192,32768]{2,1,0} parameter(0)
param.10 = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1)
all-to-all = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} all-to-all(param.10), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={3}
transpose.15 = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(all-to-all), dimensions={0,3,1,2,4,5}
reshape.2170 = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(transpose.15)
reshape.2173 = bf16[4,8192,1,2048]{3,2,1,0} reshape(reshape.2170)
transpose.16 = bf16[1,4,2048,8192]{2,0,3,1} transpose(reshape.2173), dimensions={2,0,3,1}
copy.53 = bf16[1,4,2048,8192]{3,2,1,0} copy(transpose.16)
ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(copy.53, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1)
CHECK-DAG: %[[TRANSPOSE0:.*]] = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} %[[P1:.*]]), dimensions={0,3,1,2,4,5}
CHECK-DAG: %[[RESHAPE0:.*]] = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} %[[TRANSPOSE0:.*]])
CHECK-DAG: %[[RESHAPE1:.*]] = bf16[4,8192,1,2048]{3,2,1,0} reshape(bf16[1,4,8192,1,2048]{4,3,2,1,0} %[[RESHAPE0:.*]])
CHECK-DAG: %[[TRANSPOSE1:.*]] = bf16[1,4,2048,8192]{2,0,3,1} transpose(bf16[4,8192,1,2048]{3,2,1,0} %[[RESHAPE1:.*]]), dimensions={2,0,3,1}
CHECK-DAG: %[[COPY:.*]] = bf16[1,4,2048,8192]{3,2,1,0} copy(bf16[1,4,2048,8192]{2,0,3,1} %[[TRANSPOSE1:.*]])
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [6144:8192]}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"9","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [4096:6144]}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [2048:4096]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [0:2048]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), | 2,053 |
#ifndef XLA_SERVICE_GPU_CUDNN_NORM_REWRITER_H_
#define XLA_SERVICE_GPU_CUDNN_NORM_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class CudnnNormRewriter : public HloModulePass {
public:
explicit CudnnNormRewriter(se::CudaComputeCapability cuda_compute_capability);
absl::string_view name() const override { return "norm-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::CudaComputeCapability cuda_compute_capability_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_norm_rewriter.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/dnn.pb.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#endif
namespace xla {
namespace gpu {
namespace {
namespace m = match;
const HloInstruction* SkipUnaryOps(const HloInstruction* instr) {
while (instr->opcode() == HloOpcode::kConvert ||
instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kReshape) {
instr = instr->operand(0);
}
return instr;
}
void SkipUnaryOpsTopDownRecursive(HloInstruction* instr,
std::vector<HloInstruction*>& instrs) {
if (instr->opcode() == HloOpcode::kConvert ||
instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kReshape) {
for (HloInstruction* user : instr->users()) {
SkipUnaryOpsTopDownRecursive(user, instrs);
}
} else {
instrs.emplace_back(instr);
}
}
struct NormMetadata {
HloInstruction *x_transpose, *y_transpose;
std::vector<int64_t> norm_dims_adjusted, non_norm_dims_adjusted;
};
using NormMetadataMap = absl::flat_hash_map<HloInstruction*, NormMetadata>;
class UniqueHloInstruction {
public:
UniqueHloInstruction()
: is_set_(false), instr_(nullptr), capture_or_verify_() {}
HloInstruction* Instr() const { return instr_; }
void SetInstr(HloInstruction* instr) {
is_set_ = true;
instr_ = instr;
}
bool CaptureOrVerify(HloInstruction* instr) {
if (is_set_ && instr != instr_) {
instr_ = nullptr;
}
if (!is_set_) {
is_set_ = true;
instr_ = instr;
}
return instr_;
}
std::function<bool(const HloInstruction*)> GetCaptureOrVerifyFn() {
if (!capture_or_verify_) {
capture_or_verify_ = [this](const HloInstruction* instr) -> bool {
return CaptureOrVerify(const_cast<HloInstruction*>(instr));
};
}
return capture_or_verify_;
}
private:
bool is_set_;
HloInstruction* instr_;
std::function<bool(const HloInstruction*)> capture_or_verify_;
};
absl::StatusOr<int64_t> CConstant(
se::CudaComputeCapability cuda_compute_capability) {
if (cuda_compute_capability.major == se::CudaComputeCapability::AMPERE) {
return 32 * 128;
} else if (cuda_compute_capability.major ==
se::CudaComputeCapability::HOPPER) {
return 32 * 144;
}
return xla::Internal("Norm kernels require Ampere or Hopper architecture.");
}
bool CompatibleElementType(const HloInstruction* instr) {
PrimitiveType element_type = instr->shape().element_type();
return element_type == BF16 || element_type == F16 || element_type == F32;
}
std::vector<int64_t> AdjustedDimensions(const Shape& shape,
absl::Span<const int64_t> dimensions) {
absl::flat_hash_map<int64_t, int64_t> dimension_map;
for (int64_t dimension = 0, non_degen_dimension = 0; dimension < shape.rank();
++dimension) {
if (shape.dimensions(dimension) > 1) {
dimension_map.insert({dimension, non_degen_dimension});
non_degen_dimension++;
}
}
std::vector<int64_t> adjusted_dimensions;
for (int64_t dimension : dimensions) {
auto non_degenerate_dimension = dimension_map.find(dimension);
if (non_degenerate_dimension != dimension_map.end()) {
adjusted_dimensions.emplace_back(non_degenerate_dimension->second);
}
}
return adjusted_dimensions;
}
std::vector<int64_t> AdjustedDimensions(const HloInstruction* instr) {
Shape shape;
if (instr->opcode() == HloOpcode::kBroadcast) {
shape = instr->shape();
} else if (instr->opcode() == HloOpcode::kReduce) {
shape = instr->operand(0)->shape();
} else {
return {};
}
return AdjustedDimensions(shape, instr->dimensions());
}
bool AppliesAddReduce(const HloInstruction* instr,
absl::Span<const int64_t> reduce_dims = {}) {
if (instr->opcode() != HloOpcode::kReduce) {
return false;
}
if (!reduce_dims.empty() && AdjustedDimensions(instr) != reduce_dims) {
return false;
}
HloComputation* reduce_comp = instr->to_apply();
HloInstruction* reduce_comp_root = reduce_comp->root_instruction();
return instr->operand_count() == 2 &&
instr->operand(1)->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsScalar(instr->operand(1)->shape()) &&
instr->operand(1)->literal().GetAsDouble({}) == 0. &&
reduce_comp_root->opcode() == HloOpcode::kAdd &&
reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter &&
reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter;
}
bool CalculatesExpectation(const HloInstruction* instr) {
instr = SkipUnaryOps(instr);
if (instr->opcode() != HloOpcode::kMultiply) {
return false;
}
bool bcast_operand = instr->operand(0)->opcode() != HloOpcode::kBroadcast;
const HloInstruction *broadcast = instr->operand(bcast_operand),
*reduce = SkipUnaryOps(instr->operand(!bcast_operand));
if (reduce->opcode() != HloOpcode::kReduce ||
broadcast->opcode() != HloOpcode::kBroadcast ||
broadcast->operand(0)->opcode() != HloOpcode::kConstant) {
return false;
}
float actual_r_nelems =
broadcast->operand(0)->literal().GetAsDouble({}).value();
int64_t nelems = 1;
for (int64_t norm_dim : reduce->dimensions()) {
nelems *= reduce->operand(0)->shape().dimensions()[norm_dim];
}
float r_nelems = 1. / static_cast<float>(nelems);
float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon();
return abs(actual_r_nelems - r_nelems) <
((actual_r_nelems + r_nelems) * numerical_epsilon);
}
bool FindTargetRecursive(
const HloInstruction* instr, const HloInstruction* target,
absl::flat_hash_set<const HloInstruction*>& visited_instrs,
const HloInstruction* transpose) {
visited_instrs.emplace(instr);
const absl::flat_hash_set<HloOpcode> supported_ops = {
HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape};
if (instr == target) {
return true;
}
for (HloInstruction* user : instr->users()) {
if ((supported_ops.contains(user->opcode()) || user == transpose) &&
!visited_instrs.contains(user)) {
return FindTargetRecursive(user, target, visited_instrs, transpose);
}
}
if (supported_ops.contains(instr->opcode())) {
return FindTargetRecursive(instr->operand(0), target, visited_instrs,
transpose);
}
return false;
}
bool FindTarget(const HloInstruction* custom_call, const HloInstruction* instr,
const HloInstruction* target,
const NormMetadataMap& norm_metadata) {
absl::flat_hash_set<const HloInstruction*> visited_instrs;
auto custom_call_metadata = norm_metadata.find(custom_call);
if (custom_call_metadata == norm_metadata.end()) {
return false;
}
return FindTargetRecursive(instr, target, visited_instrs,
custom_call_metadata->second.x_transpose);
}
std::vector<int64_t> MapDimensions(const Shape& original_shape,
const Shape& reshaped_shape,
const absl::Span<const int64_t> dimensions) {
auto dimension_product =
[](const Shape& shape,
absl::Span<const int64_t> product_dimensions) -> int64_t {
int64_t product = 1;
for (int64_t product_dimension : product_dimensions) {
product *= shape.dimensions(product_dimension);
}
return product;
};
absl::flat_hash_map<int64_t, std::vector<int64_t>> dimensions_map;
std::vector<int64_t> original_dimensions, reshaped_dimensions;
for (int64_t original_dimension = 0, reshaped_dimension = 0;
original_dimension < original_shape.rank(); ++original_dimension) {
original_dimensions.emplace_back(original_dimension);
while ((reshaped_dimensions.empty() ||
dimension_product(reshaped_shape, reshaped_dimensions) <
dimension_product(original_shape, original_dimensions)) &&
reshaped_dimension < reshaped_shape.rank()) {
reshaped_dimensions.emplace_back(reshaped_dimension++);
}
if (original_dimensions.size() > 1 && reshaped_dimensions.size() > 1) {
return {};
}
if (dimension_product(original_shape, original_dimensions) ==
dimension_product(reshaped_shape, reshaped_dimensions)) {
std::vector<int64_t> original_dimensions_in_dimensions;
std::set_intersection(
original_dimensions.begin(), original_dimensions.end(),
dimensions.begin(), dimensions.end(),
std::back_inserter(original_dimensions_in_dimensions));
if (!original_dimensions_in_dimensions.empty() &&
original_dimensions_in_dimensions.size() !=
original_dimensions.size()) {
return {};
}
for (int64_t dimension : original_dimensions) {
dimensions_map.insert({dimension, reshaped_dimensions});
}
original_dimensions.clear();
reshaped_dimensions.clear();
}
}
std::vector<int64_t> mapped_dimensions;
for (int64_t dimension : dimensions) {
auto mapped_dimension = dimensions_map.find(dimension);
if (mapped_dimension == dimensions_map.end()) {
return {};
}
mapped_dimensions.insert(mapped_dimensions.end(),
mapped_dimension->second.begin(),
mapped_dimension->second.end());
}
mapped_dimensions.erase(
std::unique(mapped_dimensions.begin(), mapped_dimensions.end()),
mapped_dimensions.end());
return mapped_dimensions;
}
HloInstruction* FindAddReduceRecursive(
HloInstruction* instr, const Shape& orig_instr_shape,
const absl::Span<const int64_t> reduce_dims,
absl::flat_hash_set<HloInstruction*>& visited_instrs) {
visited_instrs.emplace(instr);
const absl::flat_hash_set<HloOpcode> supported_ops = {
HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape};
for (HloInstruction* user : instr->users()) {
if (user->opcode() == HloOpcode::kReduce) {
std::vector<int64_t> mapped_reduce_dims =
MapDimensions(orig_instr_shape, instr->shape(), reduce_dims);
if (!mapped_reduce_dims.empty() &&
AppliesAddReduce(user, mapped_reduce_dims)) {
return user;
}
}
if (supported_ops.contains(user->opcode()) &&
!visited_instrs.contains(user)) {
return FindAddReduceRecursive(user, orig_instr_shape, reduce_dims,
visited_instrs);
}
}
if (supported_ops.contains(instr->opcode())) {
return FindAddReduceRecursive(instr->mutable_operand(0), orig_instr_shape,
reduce_dims, visited_instrs);
}
return nullptr;
}
HloInstruction* FindAddReduce(HloInstruction* instr,
const absl::Span<const int64_t> reduce_dims) {
absl::flat_hash_set<HloInstruction*> visited_instrs;
return FindAddReduceRecursive(instr, instr->shape(), reduce_dims,
visited_instrs);
}
template <typename Pattern>
auto SupportedConvert(Pattern pattern) {
auto supported_convert = [](const HloInstruction* instr) -> bool {
return CompatibleElementType(instr) &&
CompatibleElementType(instr->operand(0));
};
return m::Convert(pattern).WithPredicate(supported_convert);
}
template <typename Pattern>
auto SupportedBitcastOrReshape(Pattern pattern) {
auto supported_bitcast_or_reshape = [](const HloInstruction* instr) -> bool {
return ShapeUtil::Equal(
ShapeUtil::DropDegenerateDimensions(instr->shape()),
ShapeUtil::DropDegenerateDimensions(instr->operand(0)->shape()));
};
return m::AnyOf<HloInstruction>(
m::Bitcast(pattern).WithPredicate(supported_bitcast_or_reshape),
m::Reshape(pattern).WithPredicate(supported_bitcast_or_reshape));
}
template <typename Pattern>
auto OptionalSupportedTransform(Pattern pattern) {
auto shared_subpattern = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(
SupportedConvert(SupportedBitcastOrReshape(shared_subpattern)),
SupportedBitcastOrReshape(SupportedConvert(shared_subpattern)),
SupportedConvert(shared_subpattern),
SupportedBitcastOrReshape(shared_subpattern), shared_subpattern);
}
template <typename Pattern>
auto BitcastOrReshape(Pattern pattern) {
return OptionalSupportedTransform(
m::AnyOf<HloInstruction>(m::Bitcast(pattern), m::Reshape(pattern)));
}
template <typename Pattern>
auto Transpose(Pattern pattern) {
return OptionalSupportedTransform(m::Transpose(pattern));
}
template <typename Pattern>
auto Rsqrt(HloInstruction** rsqrt, Pattern pattern) {
return OptionalSupportedTransform(m::Rsqrt(rsqrt, pattern));
}
template <typename Pattern0, typename Pattern1>
auto AddAnyOrder(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::AddAnyOrder(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto Subtract(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::Subtract(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto Subtract(HloInstruction** subtract, Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::Subtract(subtract, pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto MultiplyAnyOrder(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::MultiplyAnyOrder(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto MultiplyAnyOrder(HloInstruction** multiply, Pattern0 pattern0,
Pattern1 pattern1) {
return OptionalSupportedTransform(
m::MultiplyAnyOrder(multiply, pattern0, pattern1));
}
template <typename Pattern>
auto Square(Pattern pattern) {
return MultiplyAnyOrder(pattern, pattern)
.WithPredicate([](const HloInstruction* instr) {
return instr->unique_operands().size() == 1;
});
}
template <typename Pattern>
auto Cube(Pattern pattern) {
auto unique_cube = [](const HloInstruction* instr) -> bool {
bool square_operand = instr->operand(0)->opcode() != HloOpcode::kMultiply;
return instr->operand(!square_operand)->opcode() != HloOpcode::kMultiply &&
instr->operand(square_operand)->operand(0) ==
instr->operand(!square_operand);
};
return MultiplyAnyOrder(Square(pattern), pattern).WithPredicate(unique_cube);
}
template <typename Pattern>
auto AddReduce(Pattern pattern) {
return OptionalSupportedTransform(
m::Reduce(pattern, m::Op())
.WithPredicate([](const HloInstruction* instr) {
return AppliesAddReduce(instr);
}));
}
template <typename Pattern>
auto AddReduce(HloInstruction** reduction, Pattern pattern) {
return OptionalSupportedTransform(
m::Reduce(reduction, pattern, m::Op())
.WithPredicate([](const HloInstruction* instr) {
return AppliesAddReduce(instr);
}));
}
template <typename Pattern>
auto NegateAddReduce(HloInstruction** reduction, Pattern pattern) {
return m::AnyOf<HloInstruction>(AddReduce(reduction, m::Negate(pattern)),
m::Negate(AddReduce(reduction, pattern)));
}
template <typename Pattern>
auto Expectation(Pattern pattern) {
auto shared_subpattern =
MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
});
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename Pattern>
auto Expectation(UniqueHloInstruction* expectation, Pattern pattern) {
auto shared_subpattern = OptionalSupportedTransform(
m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
})
.WithPredicate(expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename Pattern>
auto Expectation(UniqueHloInstruction* expectation, HloInstruction** reduce,
Pattern pattern) {
auto shared_subpattern = OptionalSupportedTransform(
m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()),
AddReduce(reduce, pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
})
.WithPredicate(expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
auto Variance(UniqueHloInstruction* variance, UniqueHloInstruction* expectation,
UniqueHloInstruction* x) {
return m::AnyOf<HloInstruction>(
Subtract(
Expectation(Square(OptionalSupportedTransform(
m::Op().WithPredicate(x->GetCaptureOrVerifyFn())))),
Square(Expectation(expectation,
OptionalSupportedTransform(m::Op().WithPredicate(
x->GetCaptureOrVerifyFn())))))
.WithPredicate(variance->GetCaptureOrVerifyFn()),
Expectation(
Square(Subtract(
OptionalSupportedTransform(
m::Op().WithPredicate(x->GetCaptureOrVerifyFn())),
Expectation(expectation,
OptionalSupportedTransform(m::Op().WithPredicate(
x->GetCaptureOrVerifyFn()))))))
.WithPredicate(variance->GetCaptureOrVerifyFn()));
}
auto NormFactor(HloInstruction** norm_factor, UniqueHloInstruction* x,
UniqueHloInstruction* variance,
UniqueHloInstruction* expectation,
UniqueHloInstruction* epsilon) {
auto shared_subpattern = m::SharedSubpattern(Rsqrt(
norm_factor, AddAnyOrder(Variance(variance, expectation, x),
m::Broadcast(m::ConstantScalar().WithPredicate(
epsilon->GetCaptureOrVerifyFn())))));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename P0, typename P1, typename P2>
auto MultiplyMultiplyAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(
MultiplyAnyOrder(p0, MultiplyAnyOrder(p1, p2)),
MultiplyAnyOrder(p1, MultiplyAnyOrder(p0, p2)),
MultiplyAnyOrder(p2, MultiplyAnyOrder(p0, p1)));
}
template <typename P0, typename P1, typename P2>
auto AddAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(AddAnyOrder(p0, AddAnyOrder(p1, p2)),
AddAnyOrder(p1, AddAnyOrder(p0, p2)),
AddAnyOrder(p2, AddAnyOrder(p0, p1)));
}
template <typename P0, typename P1, typename P2>
auto MultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(
MultiplyAnyOrder(p0, AddAnyOrder(p1, p2)),
AddAnyOrder(MultiplyAnyOrder(p0, p1), MultiplyAnyOrder(p0, p2)));
}
template <typename P0, typename P1, typename P2>
auto SubtractAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(AddAnyOrder(Subtract(p0, p1), p2),
AddAnyOrder(Subtract(p2, p1), p0),
Subtract(AddAnyOrder(p0, p2), p1));
}
template <typename P0, typename P1, typename P2, typename P3, typename P4>
auto SubtractMultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2, P3 p3, P4 p4) {
return m::AnyOf<HloInstruction>(
SubtractAddAnyOrder(MultiplyMultiplyAnyOrder(p0, p2, p3),
MultiplyMultiplyAnyOrder(p1, p2, p3), p4),
AddAnyOrder(MultiplyMultiplyAnyOrder(Subtract(p0, p1), p2, p3), p4));
}
auto FusedExpectation(UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
1));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedExpectation(UniqueHloInstruction* fused_expectation,
UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(
m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
1)
.WithPredicate(fused_expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedNormFactor(UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
2));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedNormFactor(UniqueHloInstruction* fused_norm_factor,
UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(
m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
2)
.WithPredicate(fused_norm_factor->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto DNormFactor(UniqueHloInstruction* custom_call) {
return MultiplyAnyOrder(m::Broadcast(m::ConstantScalar(-0.5)),
Cube(FusedNormFactor(custom_call)));
}
auto XCenter(UniqueHloInstruction* x, UniqueHloInstruction* custom_call,
const NormMetadataMap& norm_metadata) {
auto capture_or_verify_x =
[x, | #include <string>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/stream_executor/device_description.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#endif
#include "xla/service/gpu/tests/gpu_codegen_test.h"
namespace xla {
namespace gpu {
namespace {
class CudnnNormRewriterTest : public GpuCodegenTest {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cudnn_layer_norm(true);
return debug_options;
}
protected:
void TestNorm(std::string hlo_text, std::string optimized_hlo) {
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, optimized_hlo);
}
};
TEST_F(CudnnNormRewriterTest, LayerNorm2D1) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4] parameter(0)
input_square = f32[2,4] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2] reduce(input, c0),dimensions={1}, to_apply=apply
input_mean = f32[2] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2] multiply(input_mean, input_mean)
variance = f32[2] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2] add(variance, epsilon_bcast)
norm_factor = f32[2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0}
input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0}
input_center = f32[2,4] subtract(input, input_mean_bcast)
norm = f32[2,4] multiply(norm_factor_bcast, input_center)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4] broadcast(scale), dimensions={1}
norm_scale = f32[2,4] multiply(norm, scale_bcast)
bias = f32[4] parameter(2)
bias_broadcast = f32[2,4] broadcast(bias), dimensions={1}
ROOT out = f32[2,4] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,4,6] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2,4,6] multiply(input_mean, input_mean)
variance = f32[2,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast)
norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3Degenerate0) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[1,4,6,8] parameter(0)
input_square = f32[1,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[1,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[1,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[1,4,6] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[1,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[1,4,6] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[1,4,6] multiply(input_mean, input_mean)
variance = f32[1,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[1,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[1,4,6] add(variance, epsilon_bcast)
norm_factor = f32[1,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[1,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[1,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[1,4,6,8] subtract(input, input_mean_bcast)
norm = f32[1,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[1,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[1,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[1,4,6,8] broadcast(bias), dimensions={3}
ROOT out = f32[1,4,6,8] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[1,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[1,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[24,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D2) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,8] reduce(input_square, c0), dimensions={2}, to_apply=apply
r_nelems = f32[] constant(0.166667)
r_nelems_bcast = f32[2,4,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,4,8] reduce(input, c0), dimensions={2}, to_apply=apply
input_mean = f32[2,4,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,4,8] multiply(input_mean, input_mean)
variance = f32[2,4,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,8] add(variance, epsilon_bcast)
norm_factor = f32[2,4,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[6] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={2}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,4,8,6]{3,2,1,0} transpose([[P0]]), dimensions={0,1,3,2}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D2Degenerate1) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,1,6,8] parameter(0)
input_square = f32[2,1,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,1,8] reduce(input_square, c0), dimensions={2}, to_apply=apply
r_nelems = f32[] constant(0.166667)
r_nelems_bcast = f32[2,1,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,1,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,1,8] reduce(input, c0), dimensions={2}, to_apply=apply
input_mean = f32[2,1,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,1,8] multiply(input_mean, input_mean)
variance = f32[2,1,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,1,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,1,8] add(variance, epsilon_bcast)
norm_factor = f32[2,1,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,1,6,8] broadcast(norm_factor), dimensions={0,1,3}
input_mean_bcast = f32[2,1,6,8] broadcast(input_mean), dimensions={0,1,3}
input_center = f32[2,1,6,8] subtract(input, input_mean_bcast)
norm = f32[2,1,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[6] parameter(1)
scale_bcast = f32[2,1,6,8] broadcast(scale), dimensions={2}
norm_scale = f32[2,1,6,8] multiply(norm, scale_bcast)
bias = f32[6] parameter(2)
bias_broadcast = f32[2,1,6,8] broadcast(bias), dimensions={2}
ROOT out = f32[2,1,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,1,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,1,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,1,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[1,2,8,6]{3,2,1,0} transpose([[P0]]), dimensions={1,0,3,2}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,1,6,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D12) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.041667)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[4,6] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,4,6]{3,2,1,0} transpose([[P0]]), dimensions={0,3,1,2}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D12Degenerate2) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,1,8] parameter(0)
input_square = f32[2,4,1,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,1,8] subtract(input, input_mean_bcast)
norm = f32[2,4,1,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,1] parameter(1)
scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast)
bias = f32[4,1] parameter(2)
bias_broadcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2}
ROOT out = f32[2,4,1,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1]) -> f32[2,4,1,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[1,2,8,4]{3,2,1,0} transpose([[P0]]), dimensions={2,0,3,1}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[FUSION:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3IncorrectScaleBroadcast) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,2,2,2] parameter(0)
input_square = f32[2,2,2,2] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,2,2] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.5)
r_nelems_bcast = f32[2,2,2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,2,2] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2,2,2] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,2,2] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2,2,2] multiply(input_mean, input_mean)
variance = f32[2,2,2] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,2,2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,2,2] add(variance, epsilon_bcast)
norm_factor = f32[2,2,2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,2,2,2] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,2,2,2] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,2,2,2] subtract(input, input_mean_bcast)
norm = f32[2,2,2,2] multiply(norm_factor_bcast, input_center)
scale = f32[2] parameter(1)
scale_bcast = f32[2,2,2,2] broadcast(scale), dimensions={2}
norm_scale = f32[2,2,2,2] multiply(norm, scale_bcast)
bias = f32[2] parameter(2)
bias_bcast = f32[2,2,2,2] broadcast(bias), dimensions={3}
ROOT out = f32[2,2,2,2] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,2,2,2], {{.*}}: f32[2], {{.*}}: f32[2]) -> f32[2,2,2,2] {
; CHECK-NOT: custom_call_target="__cudnn$norm"
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain2D1) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4] parameter(0)
input_square = f32[2,4] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2] multiply(input_square_sum,r_nelems_bcast)
reduce = f32[2] reduce(input, c0), dimensions={1}, to_apply=apply
input_mean = f32[2] multiply(reduce,r_nelems_bcast)
input_mean_square = f32[2] multiply(input_mean,input_mean)
variance = f32[2] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2] add(variance, epsilon_bcast)
norm_factor = f32[2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0}
input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0}
input_center = f32[2,4] subtract(input,input_mean_bcast)
norm = f32[2,4] multiply(norm_factor_bcast,input_center)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4] broadcast(scale), dimensions={1}
norm_scale = f32[2,4] multiply(norm,scale_bcast)
bias = f32[4] parameter(2)
bias_broadcast = f32[2,4] broadcast(bias), dimensions={1}
norm_scale_bias = f32[2,4] add(norm_scale, bias_broadcast)
norm_factor_cube = f32[2] divide(norm_factor, variance_plus_epsilon)
ROOT out = (f32[2,4], f32[2], f32[2], f32[2]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> (f32[2,4], f32[2], f32[2], f32[2]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE0]])
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1
; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE1]])
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2
; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE2]])
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2]{0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}) tuple([[GTE0_BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain4D3) {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, | 2,054 |
#ifndef XLA_SERVICE_GPU_PIPELINED_P2P_REWRITER_H_
#define XLA_SERVICE_GPU_PIPELINED_P2P_REWRITER_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class PipelinedP2PRewriter : public HloModulePass {
public:
absl::string_view name() const override { return "pipelined-p2p-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/pipelined_p2p_rewriter.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using CollectiveInComputation =
absl::flat_hash_map<const HloComputation*, bool>;
using InstructionVector = HloInstruction::InstructionVector;
struct PipelinedP2PInfo {
int64_t opnd_start;
int64_t opnd_end;
};
bool IsCollectiveOp(const HloInstruction* op) {
HloOpcode opcode = op->opcode();
if (opcode == HloOpcode::kCustomCall) {
return true;
}
return hlo_query::IsCollectiveCommunicationOp(opcode) ||
opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv;
}
bool MayInvokeCollectiveOp(
const HloInstruction* hlo,
const CollectiveInComputation& collective_in_computation) {
if (IsCollectiveOp(hlo)) {
return true;
}
for (HloComputation* callee : hlo->called_computations()) {
auto collective_in_comp = collective_in_computation.find(callee);
CHECK(collective_in_comp != collective_in_computation.end());
if (collective_in_comp->second) {
return true;
}
}
return false;
}
HloInstruction* FindUniqueGTEUserWithIndex(const HloInstruction* op,
int64_t idx) {
CHECK(op->shape().IsTuple());
HloInstruction* gte = nullptr;
for (auto user : op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
if (user->tuple_index() == idx) {
if (gte == nullptr) {
gte = user;
} else {
return nullptr;
}
}
}
return gte;
}
bool HasGTEUserWithIndex(const HloInstruction* op, int64_t idx) {
CHECK(op->shape().IsTuple());
for (auto user : op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
if (user->tuple_index() == idx) {
return true;
}
}
return false;
}
HloInstruction* MaySkipTrivialTuple(HloInstruction* op) {
if (op->opcode() != HloOpcode::kTuple) {
return op;
}
HloInstruction* hidden_op = nullptr;
for (auto opnd : op->mutable_operands()) {
if (opnd->opcode() != HloOpcode::kGetTupleElement) {
return op;
}
if (hidden_op == nullptr) {
hidden_op = opnd->mutable_operand(0);
} else if (opnd->mutable_operand(0) != hidden_op) {
return op;
}
}
return hidden_op;
}
const HloInstruction* MaySkipTrivialTuple(const HloInstruction* op) {
return MaySkipTrivialTuple(const_cast<HloInstruction*>(op));
}
std::optional<PipelinedP2PInfo>
FindConsecutiveAndBalanceBlockOfSendDoneRecvDone(
const HloInstruction* while_init) {
PipelinedP2PInfo pipelined_p2p_info{0, 0};
auto has_started = [&]() {
return pipelined_p2p_info.opnd_start != pipelined_p2p_info.opnd_end;
};
int difference = 0;
for (int64_t i = 0; i < while_init->operand_count(); ++i) {
const HloInstruction* op = while_init->operand(i);
if ((op->opcode() == HloOpcode::kRecvDone ||
op->opcode() == HloOpcode::kSendDone) &&
op->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) {
if (op->opcode() == HloOpcode::kRecvDone) {
difference++;
} else {
difference--;
}
if (!has_started()) {
pipelined_p2p_info.opnd_start = i;
}
pipelined_p2p_info.opnd_end = i + 1;
} else {
if (has_started()) {
VLOG(10) << "End a consecutive block";
break;
}
}
}
if (difference != 0) {
VLOG(10) << "Mismatch number of SendDone and RecvDone: " << difference;
return std::nullopt;
}
if (has_started()) {
for (int64_t i = pipelined_p2p_info.opnd_end;
i < while_init->operand_count(); ++i) {
const HloInstruction* op = while_init->operand(i);
if (op->opcode() == HloOpcode::kRecvDone ||
op->opcode() == HloOpcode::kSendDone) {
VLOG(10) << "SendDone/RecvDone outside the consecutive block";
return std::nullopt;
break;
}
}
}
if (!has_started()) {
VLOG(10) << "No SendDone/RecvDone in while-init ";
return std::nullopt;
}
return pipelined_p2p_info;
}
std::optional<PipelinedP2PInfo> FindPipelinedP2P(
const HloInstruction* while_op) {
VLOG(10) << "while_op: " << while_op->ToString();
const HloInstruction* while_init = while_op->while_init();
if (while_init->opcode() != HloOpcode::kTuple ||
while_init->user_count() != 1) {
return std::nullopt;
}
const HloComputation* while_body = while_op->while_body();
const HloComputation* while_condition = while_op->while_condition();
if (while_body->num_parameters() != 1 ||
while_condition->num_parameters() != 1) {
return std::nullopt;
}
std::optional<PipelinedP2PInfo> pipelined_p2p_info =
FindConsecutiveAndBalanceBlockOfSendDoneRecvDone(while_init);
if (!pipelined_p2p_info.has_value()) {
return std::nullopt;
}
VLOG(10) << "opnd_start " << pipelined_p2p_info->opnd_start << " opnd_end "
<< pipelined_p2p_info->opnd_end;
for (int64_t i = pipelined_p2p_info->opnd_start;
i < pipelined_p2p_info->opnd_end; ++i) {
const HloInstruction* op = while_init->operand(i);
if (op->opcode() == HloOpcode::kRecvDone) {
if (!FindUniqueGTEUserWithIndex(while_op, i)) {
VLOG(10) << "While result get-tuple-element user with index " << i
<< " not unique";
return std::nullopt;
}
if (!FindUniqueGTEUserWithIndex(while_body->parameter_instruction(0),
i)) {
VLOG(10) << "While-body parameter get-tuple-element user with index "
<< i << " not unique";
return std::nullopt;
}
} else {
CHECK(op->opcode() == HloOpcode::kSendDone);
if (HasGTEUserWithIndex(while_op, i) ||
HasGTEUserWithIndex(while_body->parameter_instruction(0), i)) {
VLOG(10) << "SendDone with index " << i << " has unexpected users";
return std::nullopt;
}
}
}
const HloInstruction* root = while_body->root_instruction();
for (int64_t i = pipelined_p2p_info->opnd_start;
i < pipelined_p2p_info->opnd_end; ++i) {
const HloInstruction* op_init = while_init->operand(i);
const HloInstruction* op_root = root->operand(i);
op_root = MaySkipTrivialTuple(op_root);
if (op_init->opcode() != op_root->opcode()) {
VLOG(10) << "Mismatching opcode, op_init: " << op_init->ToString()
<< " op_root: " << op_root->ToString();
return std::nullopt;
}
}
return pipelined_p2p_info.value();
}
absl::Status RemoveOpFromParent(HloInstruction* op) {
TF_RETURN_IF_ERROR(op->DropAllControlDeps());
TF_RETURN_IF_ERROR(op->parent()->RemoveInstruction(op));
return absl::OkStatus();
}
absl::Status ReplaceOpInSequence(HloInstruction* old_op, HloInstruction* new_op,
HloInstructionSequence& instruction_sequence) {
VLOG(10) << "old_op: " << old_op->ToString();
VLOG(10) << "new_op: " << new_op->ToString();
instruction_sequence.replace_instruction(old_op, new_op);
return RemoveOpFromParent(old_op);
}
absl::Status ReplaceUsesAndUpdateSequence(
HloInstruction* old_op, HloInstruction* new_op,
HloInstructionSequence& instruction_sequence, bool diff_shape = false) {
VLOG(10) << "old_op: " << old_op->ToString();
VLOG(10) << "new_op: " << new_op->ToString();
if (diff_shape) {
TF_RETURN_IF_ERROR(old_op->ReplaceAllUsesWithDifferentShape(new_op));
} else {
TF_RETURN_IF_ERROR(old_op->ReplaceAllUsesWith(new_op));
}
return ReplaceOpInSequence(old_op, new_op, instruction_sequence);
}
absl::Status ReplaceUsesAndUpdateSequence(
const InstructionVector& old_ops, const InstructionVector& new_ops,
HloInstructionSequence& instruction_sequence) {
CHECK(old_ops.size() == new_ops.size());
for (int64_t i = 0; i < old_ops.size(); ++i) {
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(old_ops[i], new_ops[i],
instruction_sequence));
}
return absl::OkStatus();
}
absl::Status RemoveDoneOpsAndUpdateSequence(
const InstructionVector& ops,
HloInstructionSequence& instruction_sequence) {
auto remove_op = [&](HloInstruction* op) {
VLOG(10) << "op: " << op->ToString();
TF_RETURN_IF_ERROR(RemoveOpFromParent(op));
instruction_sequence.remove_instruction(op);
return absl::OkStatus();
};
for (auto op : ops) {
if (op->opcode() == HloOpcode::kTuple) {
InstructionVector to_remove;
HloInstruction* tuple_op = op;
op = MaySkipTrivialTuple(tuple_op);
to_remove.push_back(tuple_op);
for (auto opnd : tuple_op->mutable_operands()) {
to_remove.push_back(opnd);
}
for (auto opnd : to_remove) {
TF_RETURN_IF_ERROR(remove_op(opnd));
}
}
TF_RETURN_IF_ERROR(remove_op(op));
}
return absl::OkStatus();
}
bool InsertBeforeFirstCollectiveOp(
const InstructionVector& ops,
const CollectiveInComputation& collective_in_computation,
HloInstructionSequence& instruction_sequence, int64_t& idx,
int64_t& idx_tot) {
bool inserted = false;
while (idx < idx_tot) {
HloInstruction* hlo = instruction_sequence.instructions()[idx];
if (MayInvokeCollectiveOp(hlo, collective_in_computation)) {
for (auto op : ops) {
instruction_sequence.insert_instruction(op, idx);
idx++;
idx_tot++;
}
inserted = true;
break;
}
idx++;
}
return inserted;
}
void CopyInstructionInfo(const HloInstruction* old_op, HloInstruction* new_op) {
new_op->set_metadata(old_op->metadata());
new_op->add_frontend_attributes(old_op->frontend_attributes());
new_op->CopyBackendConfigFrom(old_op);
}
HloInstruction* CreateRecvDoneFrom(const HloInstruction* old_recv_done,
HloInstruction* recv,
HloComputation* computation) {
HloInstruction* recv_done =
computation->AddInstruction(HloInstruction::CreateRecvDone(
recv, old_recv_done->channel_id().value()));
CopyInstructionInfo(old_recv_done, recv_done);
return recv_done;
}
HloInstruction* CreateSendDoneFrom(const HloInstruction* old_send_done,
HloInstruction* send,
HloComputation* computation) {
HloInstruction* send_done =
computation->AddInstruction(HloInstruction::CreateSendDone(
send, old_send_done->channel_id().value()));
CopyInstructionInfo(old_send_done, send_done);
return send_done;
}
absl::Status RewritePipelinedP2PWhileBody(
const CollectiveInComputation& collective_in_computation,
const std::vector<Shape>& new_parameter_shapes, HloInstruction* while_op,
int64_t opnd_start, int64_t opnd_end) {
HloComputation* computation = while_op->while_body();
HloInstruction* while_init = while_op->while_init();
HloInstruction* root = computation->root_instruction();
HloInstructionSequence& instruction_sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
HloInstruction* param = computation->parameter_instruction(0);
*param->mutable_shape() = ShapeUtil::MakeTupleShape(new_parameter_shapes);
InstructionVector recv_dones;
InstructionVector new_recv_dones;
InstructionVector new_send_dones;
for (int64_t i = opnd_start; i < opnd_end; ++i) {
const HloInstruction* op = root->operand(i);
op = MaySkipTrivialTuple(op);
if (op->opcode() == HloOpcode::kRecvDone) {
HloInstruction* gte = FindUniqueGTEUserWithIndex(param, i);
CHECK(gte != nullptr);
recv_dones.push_back(gte);
HloInstruction* recv = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(param, i));
HloInstruction* recv_done = CreateRecvDoneFrom(op, recv, computation);
new_recv_dones.push_back(recv_done);
continue;
}
CHECK(op->opcode() == HloOpcode::kSendDone);
HloInstruction* send = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(param, i));
HloInstruction* send_done = CreateSendDoneFrom(op, send, computation);
new_send_dones.push_back(send_done);
}
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(recv_dones, new_recv_dones,
instruction_sequence));
InstructionVector done_ops;
InstructionVector new_opnds;
for (int64_t i = 0; i < while_init->operand_count(); ++i) {
HloInstruction* op = root->mutable_operand(i);
if (i >= opnd_start && i < opnd_end) {
new_opnds.push_back(MaySkipTrivialTuple(op)->mutable_operand(0));
done_ops.push_back(op);
} else {
new_opnds.push_back(op);
}
}
HloInstruction* new_root =
computation->AddInstruction(HloInstruction::CreateTuple(new_opnds));
computation->set_root_instruction(new_root,
true);
TF_RETURN_IF_ERROR(computation->RemoveInstruction(root));
instruction_sequence.replace_instruction(root, new_root);
TF_RETURN_IF_ERROR(
RemoveDoneOpsAndUpdateSequence(done_ops, instruction_sequence));
int64_t idx = 0;
int64_t idx_end = instruction_sequence.size();
bool inserted =
InsertBeforeFirstCollectiveOp(new_send_dones, collective_in_computation,
instruction_sequence, idx, idx_end);
CHECK(inserted);
CHECK(idx_end == instruction_sequence.size());
return absl::OkStatus();
}
void RewritePipelinedP2PWhileCond(
const std::vector<Shape>& new_parameter_shapes, HloInstruction* while_op) {
HloComputation* computation = while_op->while_condition();
HloInstruction* param = computation->parameter_instruction(0);
*param->mutable_shape() = ShapeUtil::MakeTupleShape(new_parameter_shapes);
VLOG(10) << computation->ToString();
}
absl::Status TransformLoop(
const PipelinedP2PInfo& pipelined_info,
const CollectiveInComputation& collective_in_computation, int64_t& idx,
int64_t& idx_end, HloInstructionSequence& instruction_sequence,
HloInstruction* while_op) {
HloComputation* computation = while_op->parent();
int64_t opnd_start = pipelined_info.opnd_start;
int64_t opnd_end = pipelined_info.opnd_end;
VLOG(10) << "Transform pipelined while-op " << while_op->ToString();
HloInstruction* while_init = while_op->while_init();
InstructionVector new_while_init_opnds;
std::vector<Shape> new_parameter_shapes;
for (int64_t i = 0; i < while_init->operand_count(); ++i) {
HloInstruction* op = while_init->mutable_operand(i);
if (i >= opnd_start && i < opnd_end) {
new_while_init_opnds.push_back(op->mutable_operand(0));
} else {
new_while_init_opnds.push_back(op);
}
new_parameter_shapes.push_back(new_while_init_opnds.back()->shape());
}
RewritePipelinedP2PWhileCond(new_parameter_shapes, while_op);
TF_RETURN_IF_ERROR(RewritePipelinedP2PWhileBody(
collective_in_computation, new_parameter_shapes, while_op, opnd_start,
opnd_end));
HloInstruction* new_while_init = computation->AddInstruction(
HloInstruction::CreateTuple(new_while_init_opnds), "while-init");
VLOG(10) << "new_while_init: " << new_while_init->ToString();
HloInstruction* new_while_op = computation->AddInstruction(
HloInstruction::CreateWhile(
while_op->while_body()->root_instruction()->shape(),
while_op->while_condition(), while_op->while_body(), new_while_init),
"while-result");
CopyInstructionInfo(while_op, new_while_op);
VLOG(10) << "new_while_op: " << new_while_op->ToString();
InstructionVector recv_dones;
InstructionVector new_recv_dones;
InstructionVector new_send_dones;
InstructionVector done_ops;
for (int64_t i = opnd_start; i < opnd_end; ++i) {
HloInstruction* op = while_init->mutable_operand(i);
done_ops.push_back(op);
if (op->opcode() == HloOpcode::kRecvDone) {
HloInstruction* gte = FindUniqueGTEUserWithIndex(while_op, i);
CHECK(gte != nullptr);
recv_dones.push_back(gte);
HloInstruction* recv = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_op, i));
HloInstruction* recv_done = computation->AddInstruction(
HloInstruction::CreateRecvDone(recv, op->channel_id().value()));
new_recv_dones.push_back(recv_done);
CopyInstructionInfo(op, recv_done);
continue;
}
CHECK(op->opcode() == HloOpcode::kSendDone);
HloInstruction* send = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_op, i));
HloInstruction* send_done = computation->AddInstruction(
HloInstruction::CreateSendDone(send, op->channel_id().value()));
new_send_dones.push_back(send_done);
CopyInstructionInfo(op, send_done);
}
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(
while_op, new_while_op, instruction_sequence, true));
TF_RETURN_IF_ERROR(
ReplaceOpInSequence(while_init, new_while_init, instruction_sequence));
TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(recv_dones, new_recv_dones,
instruction_sequence));
TF_RETURN_IF_ERROR(
RemoveDoneOpsAndUpdateSequence(done_ops, instruction_sequence));
int64_t opnd_tot = opnd_end - opnd_start;
CHECK(idx_end == instruction_sequence.size() + opnd_tot);
CHECK(instruction_sequence.instructions()[idx - opnd_tot] == new_while_op);
idx_end -= opnd_tot;
idx = idx - opnd_tot + 1;
bool inserted =
InsertBeforeFirstCollectiveOp(new_send_dones, collective_in_computation,
instruction_sequence, idx, idx_end);
CHECK(idx_end == instruction_sequence.size());
if (!inserted) {
CHECK(idx_end == idx);
idx--;
for (auto send_done : new_send_dones) {
instruction_sequence.insert_instruction(send_done, idx++);
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> ProcessComputation(
HloModule* module, HloComputation* computation,
CollectiveInComputation& collective_in_computation) {
VLOG(10) << "Process compuation " << computation->name();
bool changed = false;
HloInstructionSequence& instruction_sequence =
module->schedule().GetOrCreateSequence(computation);
int64_t idx = 0;
int64_t idx_end = instruction_sequence.size();
while (idx < idx_end) {
HloInstruction* hlo = instruction_sequence.instructions()[idx];
if (MayInvokeCollectiveOp(hlo, collective_in_computation)) {
collective_in_computation[computation] = true;
}
if (hlo->opcode() != HloOpcode::kWhile) {
idx++;
continue;
}
std::optional<PipelinedP2PInfo> pipelined_info = FindPipelinedP2P(hlo);
if (!pipelined_info.has_value()) {
idx++;
continue;
}
TF_RETURN_IF_ERROR(TransformLoop(pipelined_info.value(),
collective_in_computation, idx, idx_end,
instruction_sequence, hlo));
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> PipelinedP2PRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
if (!module->has_schedule()) return changed;
CollectiveInComputation collective_in_computation;
for (auto* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (computation->IsFusionComputation()) {
collective_in_computation[computation] = false;
continue;
}
TF_ASSIGN_OR_RETURN(
bool cur_changed,
ProcessComputation(module, computation, collective_in_computation));
changed |= cur_changed;
}
if (changed) {
TF_RETURN_IF_ERROR(module->schedule().Update());
}
return changed;
}
}
} | #include "xla/service/gpu/pipelined_p2p_rewriter.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class PipelinedP2pRewriterTest : public HloTestBase {
protected:
void DoFileCheck(const HloModule* module, absl::string_view expected) {
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(options), expected));
EXPECT_TRUE(filecheck_matched);
}
};
TEST_F(PipelinedP2pRewriterTest, SendRecUnpipelinedNotTransform) {
const char* kModuleStr = R"(
HloModule test
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = u32[2] get-tuple-element(param), index=1
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"11"}}
ROOT recv-data = u32[2] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(PipelinedP2pRewriterTest, SendRecvPipelined1) {
const char* kModuleStr = R"(
HloModule test, is_scheduled=true
while-cond {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-data = f32[1, 1024, 1024] add(c, s)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0
gte.1 = token[] get-tuple-element(recv-done.p), index=1
recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1)
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done-tuple, send-done.p)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.1.p = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init.p = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(c0, recv-done.1.p, send-done.1.p)
while-result.p = (u32[], (f32[1,1024,1024], token[]), token[])
while(while-init.p),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1
ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
}
)";
const char* kExpected = R"(
CHECK: %while-body (param.1: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[])) {
CHECK: %param.1 = parameter(0)
CHECK: %get-tuple-element = get-tuple-element(%param.1), index=1
CHECK: %get-tuple-element.1 = get-tuple-element(%param.1), index=2
CHECK: %count.1 = get-tuple-element(%param.1), index=0
CHECK: %recv-done = recv-done(%get-tuple-element), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %recv-data = get-tuple-element(%recv-done), index=0
CHECK: %c1 = constant(1)
CHECK: %new-count = add(%count.1, %c1)
CHECK: %replica = replica-id()
CHECK: %c10 = constant(10)
CHECK: %sum = add(%replica, %c10)
CHECK: %sum2 = add(%sum, %count.1)
CHECK: %conv = convert(%sum2)
CHECK: %p = broadcast(%conv), dimensions={}
CHECK: %b = add(%p, %recv-data)
CHECK: %c = multiply(%b, %b)
CHECK: %d = tan(%c)
CHECK: %s = dot(%c, %d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
CHECK: %send-data = add(%c, %s)
CHECK: %after-all = after-all()
CHECK: %send-done = send-done(%get-tuple-element.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK{LITERAL}: %recv = recv(%after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK{LITERAL}: %send = send(%send-data, %after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK: ROOT %tuple = tuple(%new-count, %recv, %send)
CHECK: }
CHECK: %while-cond (param: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> pred[] {
CHECK: %param = parameter(0)
CHECK: %count = get-tuple-element(%param), index=0
CHECK: %ub = constant(25)
CHECK: ROOT %cond-result = compare(%count, %ub), direction=LT
CHECK: }
CHECK: ENTRY %main () -> f32[1,1024,1024] {
CHECK: %c0 = constant(0)
CHECK: %f0 = constant(0)
CHECK: %init = broadcast(%f0), dimensions={}
CHECK: %after-all.1 = after-all()
CHECK{LITERAL}: %recv.1 = recv(%after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK{LITERAL}: %send.1 = send(%init, %after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"}
CHECK: %while-init = tuple(%c0, %recv.1, %send.1)
CHECK: %while-result = while(%while-init), condition=%while-cond, body=%while-body,
CHECK-SAME{LITERAL}: backend_config={"known_trip_count":{"n":"25"}}
CHECK: %get-tuple-element.2 = get-tuple-element(%while-result), index=1
CHECK: %get-tuple-element.3 = get-tuple-element(%while-result), index=2
CHECK: %recv-done.1 = recv-done(%get-tuple-element.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %send-done.1 = send-done(%get-tuple-element.3), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: ROOT %entry-result = get-tuple-element(%recv-done.1), index=0
CHECK: })";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_TRUE(changed);
DoFileCheck(module.get(), kExpected);
}
TEST_F(PipelinedP2pRewriterTest, SendRecvTwoPipelinedWhileLoops) {
const char* kModuleStr = R"(
HloModule test, is_scheduled=true
while-cond {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
send-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0
gte.1 = token[] get-tuple-element(recv-done.p), index=1
recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1)
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done-tuple, send-done.p)
}
while-cond-2 {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body-2 {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
send-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0
gte.1 = token[] get-tuple-element(recv-done.p), index=1
recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1)
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done-tuple, send-done.p)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.1.p = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init.p = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(c0, recv-done.1.p, send-done.1.p)
while-result.p = (u32[], (f32[1,1024,1024], token[]), token[])
while(while-init.p),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1
after-all-2.1 = token[] after-all()
recv-2.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-2.1 = (f32[1, 1024, 1024], u32[], token[]) send(recv-done.1.q, after-all-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done-2.1.p = (f32[1,1024,1024], token[]) recv-done(recv-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done-2.1.p = token[] send-done(send-2.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init-2.p = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(c0, recv-done-2.1.p, send-done-2.1.p)
while-result-2.p = (u32[], (f32[1,1024,1024], token[]), token[])
while(while-init-2.p),
body=while-body-2, condition=while-cond-2,
backend_config={"known_trip_count":{"n":"25"}}
recv-done-2.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result-2.p), index=1
ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done-2.1.q), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(PipelinedP2pRewriterTest, SendRecvPipelined2) {
const char* kModuleStr = R"(
HloModule test, is_scheduled=true
while-cond {
param = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.0.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
recv-data.0 = f32[1, 1024, 1024] get-tuple-element(recv-done.0.q), index=0
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=3
recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={}
recv-data = f32[1, 1024, 1024] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-data = f32[1, 1024, 1024] add(c, s)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.p = token[] send-done(send), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1.p = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done.p, send-done.p, recv-done.1.p, send-done.1.p)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.2 = token[] after-all()
recv.2 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.2 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.2.p = (f32[1,1024,1024], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.2.p = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.3 = token[] after-all()
recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
send.3 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.3), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.3.p = (f32[1,1024,1024], token[]) recv-done(recv.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.3.p = token[] send-done(send.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
while-init.p = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.2.p, send-done.2.p, recv-done.3.p, send-done.3.p)
while-result.p = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) while(while-init.p),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.2.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1
recv-data.2 = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0
recv-done.3.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=3
recv-data.3 = f32[1, 1024, 1024] get-tuple-element(recv-done.3.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={}
ROOT entry-result = f32[1, 1024, 1024] select(compare, recv-data.2, recv-data.3)
}
)";
const char* kExpected = R"(
CHECK: %while-body (param.1: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[])) {
CHECK: %param.1 = parameter(0)
CHECK: %get-tuple-element = get-tuple-element(%param.1), index=1
CHECK: %get-tuple-element.1 = get-tuple-element(%param.1), index=2
CHECK: %get-tuple-element.2 = get-tuple-element(%param.1), index=3
CHECK: %get-tuple-element.3 = get-tuple-element(%param.1), index=4
CHECK: %count.1 = get-tuple-element(%param.1), index=0
CHECK: %recv-done = recv-done(%get-tuple-element), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %recv-data.0 = get-tuple-element(%recv-done), index=0
CHECK: %recv-done.1 = recv-done(%get-tuple-element.2), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %recv-data.1 = get-tuple-element(%recv-done.1), index=0
CHECK: %replica = replica-id()
CHECK: %constant0 = constant(0)
CHECK: %compare0 = compare(%replica, %constant0), direction=EQ
CHECK: %compare = broadcast(%compare0), dimensions={}
CHECK: %recv-data.2 = select(%compare, %recv-data.0, %recv-data.1)
CHECK: %c1 = constant(1)
CHECK: %new-count = add(%count.1, %c1)
CHECK: %c10 = constant(10)
CHECK: %sum = add(%replica, %c10)
CHECK: %sum2 = add(%sum, %count.1)
CHECK: %conv = convert(%sum2)
CHECK: %p = broadcast(%conv), dimensions={}
CHECK: %b = add(%p, %recv-data.2)
CHECK: %c = multiply(%b, %b)
CHECK: %d = tan(%c)
CHECK: %s = dot(%c, %d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
CHECK: %send-data = add(%c, %s)
CHECK: %after-all = after-all()
CHECK: %send-done = send-done(%get-tuple-element.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %send-done.1 = send-done(%get-tuple-element.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK{LITERAL}: %recv = recv(%after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK{LITERAL}: %send = send(%send-data, %after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK: %after-all.1 = after-all()
CHECK{LITERAL}: %recv.1 = recv(%after-all.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK{LITERAL}: %send.1 = send(%send-data, %after-all.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK: ROOT %tuple = tuple(%new-count, %recv, %send, %recv.1, %send.1)
CHECK: }
CHECK: %while-cond (param: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> pred[] {
CHECK: %param = parameter(0)
CHECK: %count = get-tuple-element(%param), index=0
CHECK: %ub = constant(25)
CHECK: ROOT %cond-result = compare(%count, %ub), direction=LT
CHECK: }
CHECK: ENTRY %main () -> f32[1,1024,1024] {
CHECK: %c0 = constant(0)
CHECK: %f0 = constant(0)
CHECK: %init = broadcast(%f0), dimensions={}
CHECK: %after-all.2 = after-all()
CHECK{LITERAL}: %recv.2 = recv(%after-all.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK{LITERAL}: %send.2 = send(%init, %after-all.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs="{{3,0}}"}
CHECK: %after-all.3 = after-all()
CHECK{LITERAL}: %recv.3 = recv(%after-all.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK{LITERAL}: %send.3 = send(%init, %after-all.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}"}
CHECK: %while-init = tuple(%c0, %recv.2, %send.2, %recv.3, %send.3)
CHECK{LITERAL}: %while-result = while(%while-init), condition=%while-cond, body=%while-body, backend_config={"known_trip_count":{"n":"25"}}
CHECK: %get-tuple-element.4 = get-tuple-element(%while-result), index=1
CHECK: %get-tuple-element.5 = get-tuple-element(%while-result), index=2
CHECK: %get-tuple-element.6 = get-tuple-element(%while-result), index=3
CHECK: %get-tuple-element.7 = get-tuple-element(%while-result), index=4
CHECK: %recv-done.2 = recv-done(%get-tuple-element.4), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %recv-data.3 = get-tuple-element(%recv-done.2), index=0
CHECK: %recv-done.3 = recv-done(%get-tuple-element.6), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: %recv-data.4 = get-tuple-element(%recv-done.3), index=0
CHECK: %replica.1 = replica-id()
CHECK: %constant0.1 = constant(0)
CHECK: %compare0.1 = compare(%replica.1, %constant0.1), direction=EQ
CHECK: %compare.1 = broadcast(%compare0.1), dimensions={}
CHECK: %send-done.2 = send-done(%get-tuple-element.5), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %send-done.3 = send-done(%get-tuple-element.7), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}
CHECK: ROOT %entry-result = select(%compare.1, %recv-data.3, %recv-data.4)
CHECK: })";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
PipelinedP2PRewriter rewriter;
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
EXPECT_TRUE(changed);
DoFileCheck(module.get(), kExpected);
}
}
}
} | 2,055 |
#ifndef XLA_SERVICE_GPU_RUNTIME_INTRINSICS_H_
#define XLA_SERVICE_GPU_RUNTIME_INTRINSICS_H_
#include "absl/strings/string_view.h"
namespace xla {
inline constexpr absl::string_view kXlaGpuAssertCustomCallTag =
"__xla_gpu_assert";
}
#endif
#include "xla/service/gpu/runtime_intrinsics.h"
#include <cstdint>
#include <string>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::string GetGpuPlatformName() {
return absl::AsciiStrToUpper(
PlatformUtil::CanonicalPlatformName("gpu").value());
}
absl::Status AssertOnGpu(void* stream_handle, void* buffer,
absl::string_view error_msg) {
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithName(GetGpuPlatformName()));
se::StreamExecutorConfig config;
config.gpu_stream = stream_handle;
TF_ASSIGN_OR_RETURN(se::StreamExecutor * executor,
platform->GetExecutor(config));
se::Stream* stream = executor->FindAllocatedStream(stream_handle);
if (!stream) {
return Internal("Stream not found for: %p", stream_handle);
}
int8_t expected = false;
int64_t byte_size = sizeof(int8_t);
CHECK_EQ(byte_size, ShapeUtil::ByteSizeOfPrimitiveType(PrimitiveType::PRED));
TF_RETURN_IF_ERROR(stream->Memcpy(
&expected, se::DeviceMemoryBase{buffer, static_cast<uint64_t>(byte_size)},
byte_size));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
if (!static_cast<bool>(expected)) {
return Internal("%s", error_msg);
}
return absl::OkStatus();
}
void AssertionCustomCall(void* stream_handle, void** buffers,
const char* opaque, int opaque_len,
XlaCustomCallStatus* status) {
absl::Status s =
AssertOnGpu(stream_handle, buffers[0],
absl::string_view{opaque, static_cast<uint64_t>(opaque_len)});
if (!s.ok()) {
auto msg = s.message();
XlaCustomCallStatusSetFailure(status, msg.data(), msg.size());
}
}
void NopReturnTokenCustomCall(void* stream_handle, void** buffers,
const char* opaque, int opaque_len,
XlaCustomCallStatus* status) {
VLOG(1) << "NopReturnTokenCustomCall called.";
}
}
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(
std::string(kXlaGpuAssertCustomCallTag), AssertionCustomCall,
GetGpuPlatformName());
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(
std::string(kNopReturnTokenCustomCallTarget), NopReturnTokenCustomCall,
GetGpuPlatformName());
} | #include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using RuntimeIntrinsicsTest = HloTestBase;
TEST_F(RuntimeIntrinsicsTest, NopReturnTokenWorks) {
constexpr absl::string_view kHloText = R"(
HloModule m
ENTRY e {
constant = u32[2]{0} constant({0, 1})
ROOT nop_return_token = token[] custom-call(constant), custom_call_target="NopReturnToken", custom_call_has_side_effect=true
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(kHloText));
EXPECT_EQ(module->entry_computation()->instruction_count(), 2);
EXPECT_TRUE(Run(std::move(module), false));
}
}
}
} | 2,056 |
#ifndef XLA_SERVICE_GPU_REDUCTION_LAYOUT_NORMALIZER_H_
#define XLA_SERVICE_GPU_REDUCTION_LAYOUT_NORMALIZER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionLayoutNormalizer : public HloModulePass {
public:
absl::string_view name() const override {
return "reduction-layout-normalizer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/reduction_layout_normalizer.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class EnforceMinorToMajorReduceOpVisitor : public DfsHloRewriteVisitor {
absl::Status HandleReduce(HloInstruction *hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
VLOG(5) << "Input: " << reduce->ToString();
int operand_idx = -1;
absl::InlinedVector<HloInstruction *, 2> canonical_reduce_inputs;
absl::InlinedVector<Shape, 2> new_reduce_shapes;
DimensionVector out_reduce_dimensions;
const Shape &first_instruction_shape = reduce->inputs()[0]->shape();
for (HloInstruction *operand : reduce->inputs()) {
operand_idx++;
if (operand_idx != 0 &&
operand->shape().layout() != first_instruction_shape.layout()) {
HloInstruction *copy =
reduce->parent()->AddInstruction(HloInstruction::CreateUnary(
operand->shape(), HloOpcode::kCopy, operand));
LayoutUtil::ClearLayout(copy->mutable_shape());
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
first_instruction_shape, copy->mutable_shape()));
copy->set_metadata(operand->metadata());
operand = copy;
VLOG(3) << "Copying to establish consistent inputs layout: "
<< copy->ToString();
}
const Shape &operand_shape = operand->shape();
const Layout &operand_layout = operand_shape.layout();
const Shape &reduce_shape =
reduce->shape().IsTuple() ? reduce->shape().tuple_shapes(operand_idx)
: reduce->shape();
DimensionVector new_reduce_dimensions;
DimensionVector new_operand_shape_data;
DimensionVector new_reduce_shape_data;
DimensionVector new_reduce_shape_layout(reduce_shape.rank());
std::vector<int64_t> reduce_shape_logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(reduce_shape.layout());
auto to_reduce_logical_dim = [&](int64_t op_logical_dim) {
return op_logical_dim -
absl::c_count_if(reduce->dimensions(), [&](int64_t dim) {
CHECK(dim != op_logical_dim);
return dim < op_logical_dim;
});
};
for (int i = 0; i < operand_shape.rank(); i++) {
int64_t major_to_minor_dim_idx = operand_shape.rank() - i - 1;
int64_t logical_dim =
operand_layout.minor_to_major(major_to_minor_dim_idx);
int64_t dim_size = operand_shape.dimensions(logical_dim);
VLOG(5) << "Processing logical dimension " << logical_dim << " of size "
<< dim_size;
new_operand_shape_data.push_back(dim_size);
if (absl::c_linear_search(reduce->dimensions(), logical_dim)) {
new_reduce_dimensions.push_back(i);
} else {
new_reduce_shape_data.push_back(dim_size);
int64_t logical_reduce_dim = to_reduce_logical_dim(logical_dim);
int64_t physical_reduce_dim =
reduce_shape_logical_to_physical[logical_reduce_dim];
VLOG(5) << "logical_reduce_dim = " << logical_reduce_dim << ", "
<< "physical_reduce_dim = " << physical_reduce_dim;
new_reduce_shape_layout[reduce_shape.rank() - physical_reduce_dim -
1] = new_reduce_shape_data.size() - 1;
}
}
Shape new_operand_shape = ShapeUtil::MakeShape(
operand_shape.element_type(), new_operand_shape_data);
Shape new_reduce_shape = ShapeUtil::MakeShapeWithDenseLayout(
reduce_shape.element_type(), new_reduce_shape_data,
new_reduce_shape_layout);
if (new_operand_shape == operand_shape && reduce->inputs().size() == 1) {
return absl::OkStatus();
}
HloInstruction *canonical_reduce_input =
new_operand_shape != operand_shape
? reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(new_operand_shape, operand))
: operand;
canonical_reduce_input->set_metadata(operand->metadata());
VLOG(5) << "Reduction input: " << canonical_reduce_input->ToString();
new_reduce_shapes.push_back(new_reduce_shape);
canonical_reduce_inputs.push_back(canonical_reduce_input);
if (out_reduce_dimensions.empty()) {
out_reduce_dimensions = new_reduce_dimensions;
} else {
TF_RET_CHECK(out_reduce_dimensions == new_reduce_dimensions);
}
}
Shape new_reduce_shape = ShapeUtil::MakeMaybeTupleShape(new_reduce_shapes);
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
new_reduce_shape, canonical_reduce_inputs, reduce->init_values(),
out_reduce_dimensions, reduce->to_apply());
VLOG(5) << "Generated new reduction: " << new_reduce->ToString();
const Shape &orig_reduce_shape = reduce->shape();
if (new_reduce_shape != orig_reduce_shape) {
HloInstruction *wrapped_reduce =
reduce->parent()->AddInstruction(std::move(new_reduce));
if (!new_reduce_shape.IsTuple()) {
new_reduce =
HloInstruction::CreateBitcast(reduce->shape(), wrapped_reduce);
} else {
absl::InlinedVector<HloInstruction *, 2> out;
for (int oidx = 0; oidx < reduce->input_count(); oidx++) {
HloInstruction *gte = reduce->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx));
out.push_back(
reduce->parent()->AddInstruction(HloInstruction::CreateBitcast(
orig_reduce_shape.tuple_shapes(oidx), gte)));
}
new_reduce = HloInstruction::CreateTuple(out);
}
}
VLOG(5) << "Generated output: " << new_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionLayoutNormalizer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
EnforceMinorToMajorReduceOpVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_layout_normalizer.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionLayoutNormalizerTest : public HloTestBase {
public:
void CheckReductionLayoutNormalizer(
absl::string_view hlo, std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionLayoutNormalizer{}, expected);
}
};
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTest) {
const char* hlo = R"(
HloModule ReduceWithLayoutChange
add {
x0 = f32[] parameter(0)
y0 = f32[] parameter(1)
ROOT add0 = f32[] add(x0, y0)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
constant0 = f32[] constant(0)
ROOT reduce0 = f32[4,5,16,12,12]{4,3,2,1,0} reduce(arg0, constant0),
dimensions={1,6,7}, to_apply=add
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTestVariadic) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadic
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
idxs = u32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[4,5,16,12,12]{4,3,2,1,0},
u32[4,5,16,12,12]{4,3,2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={1,6,7}, to_apply=argmax
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest,
LayoutCanonicalizerTestVariadicDifferentLayouts) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,7]{2,1,0,3} parameter(0)
idxs = u32[2,3,4,7]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-5, 1e-5}));
}
}
} | 2,057 |
#ifndef XLA_SERVICE_GPU_GPU_SANITIZE_CONSTANT_NAMES_H_
#define XLA_SERVICE_GPU_GPU_SANITIZE_CONSTANT_NAMES_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuSanitizeConstantNames : public HloModulePass {
public:
absl::string_view name() const override { return "sanitize-constant-names"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gpu_sanitize_constant_names.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/llvm_ir/buffer_assignment_util.h"
#include "xla/service/name_uniquer.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> GpuSanitizeConstantNames::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
NameUniquer instr_name_uniquer("_");
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kConstant) {
continue;
}
instr_name_uniquer.GetUniqueName(instr->name());
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() != HloOpcode::kConstant) {
continue;
}
std::string sanitized_name = llvm_ir::SanitizeConstantName(*instr);
instr->SetAndSanitizeName(sanitized_name);
instr->UniquifyName(&instr_name_uniquer);
module->instruction_name_uniquer().GetUniqueName(instr->name());
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_sanitize_constant_names.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using SanitizeConstantNamesTest = HloTestBase;
TEST_F(SanitizeConstantNamesTest, InstructionNameWithHyphenSanitized) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal-to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
}
TEST_F(SanitizeConstantNamesTest, InstructionNameWithDotSanitized) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal.to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
}
TEST_F(SanitizeConstantNamesTest, NewInstructionNameRegisteredWithModule) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal.to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
auto constant_instr =
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1));
constant_instr->SetAndSanitizeName("equal_to");
module->entry_computation()->AddInstruction(std::move(constant_instr));
EXPECT_THAT(FindInstruction(module.get(), "equal_to.1"),
GmockMatch(m::Constant()));
}
TEST_F(SanitizeConstantNamesTest, BufferSanitizedNameCollisionResolved) {
const char *const kHloString = R"(
HloModule BufferSanitizedName
ENTRY kernelEntry {
equal.to = s32[2]{0} constant({42, 73})
equal-to = s32[2]{0} constant({67, 3})
ROOT equal_to = s32[2]{0} add(equal.to, equal-to)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(GpuSanitizeConstantNames().Run(module.get()).value());
EXPECT_THAT(FindInstruction(module.get(), "equal_to_1"),
GmockMatch(m::Constant()));
EXPECT_THAT(FindInstruction(module.get(), "equal_to_2"),
GmockMatch(m::Constant()));
}
}
}
} | 2,058 |
#ifndef XLA_SERVICE_GPU_DOT_DIMENSION_SORTER_H_
#define XLA_SERVICE_GPU_DOT_DIMENSION_SORTER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class DotDimensionSorter : public HloModulePass {
public:
absl::string_view name() const override { return "dot_dimension_sorter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/dot_dimension_sorter.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
absl::Status SortDotDimensions(HloDotInstruction* dot) {
const DotDimensionNumbers& dims = dot->dot_dimension_numbers();
DotDimensionNumbers new_dims(dims);
new_dims.clear_lhs_contracting_dimensions();
new_dims.clear_rhs_contracting_dimensions();
const bool sort_by_lhs =
DistinctNumbersAreConsecutiveIfSorted(dims.lhs_contracting_dimensions());
const absl::Span<const int64_t>& sort_key =
sort_by_lhs ? dims.lhs_contracting_dimensions()
: dims.rhs_contracting_dimensions();
std::vector<int64_t> permutation;
for (const int64_t a : sort_key) {
permutation.push_back(a - *absl::c_min_element(sort_key));
}
const std::vector<int64_t> sorted_lhs =
Permute(dims.lhs_contracting_dimensions(), permutation);
*new_dims.mutable_lhs_contracting_dimensions() = {sorted_lhs.begin(),
sorted_lhs.end()};
const std::vector<int64_t> sorted_rhs =
Permute(dims.rhs_contracting_dimensions(), permutation);
*new_dims.mutable_rhs_contracting_dimensions() = {sorted_rhs.begin(),
sorted_rhs.end()};
std::unique_ptr<HloInstruction> new_dot = HloInstruction::CreateDot(
dot->shape(), dot->mutable_operand(0), dot->mutable_operand(1), new_dims,
dot->precision_config(), {dot->sparsity().begin(), dot->sparsity().end()},
absl::MakeSpan(dot->operands()).subspan(HloDotInstruction::kOperands));
dot->SetupDerivedInstruction(new_dot.get());
VLOG(3) << "Sorted dot() dimensions:\n"
<< "\t before: " << dot->ToString() << "\n"
<< "\t after: " << new_dot->ToString();
return dot->parent()->ReplaceWithNewInstruction(dot, std::move(new_dot));
}
}
absl::StatusOr<bool> DotDimensionSorter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> dots_to_process;
for (const HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() != HloOpcode::kDot) {
continue;
}
if ((instr->operand(0)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(0)->shape().layout())) ||
(instr->operand(1)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(1)->shape().layout()))) {
continue;
}
const DotDimensionNumbers& dims = instr->dot_dimension_numbers();
if (dims.lhs_contracting_dimensions_size() == 0) {
continue;
}
const bool cons_lhs = DistinctNumbersAreConsecutiveIfSorted(
dims.lhs_contracting_dimensions());
const bool cons_rhs = DistinctNumbersAreConsecutiveIfSorted(
dims.rhs_contracting_dimensions());
const bool sorted_lhs =
absl::c_is_sorted(dims.lhs_contracting_dimensions());
const bool sorted_rhs =
absl::c_is_sorted(dims.rhs_contracting_dimensions());
if ((cons_lhs && !sorted_lhs && !cons_rhs) ||
(cons_rhs && !sorted_rhs && !cons_lhs) ||
(cons_lhs && !sorted_lhs && cons_rhs && !sorted_rhs)) {
dots_to_process.push_back(instr);
}
}
}
if (dots_to_process.empty()) {
return false;
}
for (HloInstruction* dot : dots_to_process) {
TF_RETURN_IF_ERROR(SortDotDimensions(Cast<HloDotInstruction>(dot)));
}
return true;
}
}
} | #include "xla/service/gpu/dot_dimension_sorter.h"
#include <memory>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class WithoutDotDimensionSorterTest : public GpuCodegenTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("dot_dimension_sorter");
return debug_options;
}
};
TEST_F(WithoutDotDimensionSorterTest, UnsortedDimsCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, SortedDimsDoNotCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, DimOrderCanBeChanged) {
const char* hlo_text_ref = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
const char* hlo_text_modified = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_ref, hlo_text_modified,
ErrorSpec{1e-5, 1e-3},
true));
}
using DotDimensionSorterTest = GpuCodegenTest;
TEST_F(DotDimensionSorterTest, SortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
const auto& dims =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims.lhs_contracting_dimensions(0), 3);
EXPECT_EQ(dims.lhs_contracting_dimensions(1), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(1), 1);
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
const auto& dims2 =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims2.lhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims2.lhs_contracting_dimensions(1), 3);
EXPECT_EQ(dims2.rhs_contracting_dimensions(0), 1);
EXPECT_EQ(dims2.rhs_contracting_dimensions(1), 2);
}
TEST_F(DotDimensionSorterTest, NothingToReorder) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionSorterTest, SparseDotSortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,16] parameter(0)
p1 = f16[122,96,32] parameter(1)
meta = u16[1,144,96,2] parameter(2)
ROOT _ = f16[1,144,122] dot(p0, p1, meta), sparsity=L.3@2:4,
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
HloDotInstruction* dot = DynCast<HloDotInstruction>(
module->entry_computation()->root_instruction());
EXPECT_TRUE(dot != nullptr && dot->sparse_operands() == 1);
}
}
}
} | 2,059 |
#ifndef XLA_SERVICE_GPU_NVPTX_COMPILER_H_
#define XLA_SERVICE_GPU_NVPTX_COMPILER_H_
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/IR/Module.h"
#include "xla/autotune_results.pb.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/xla.pb.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
void WarnIfBadDriverJITVersion();
class NVPTXCompiler : public GpuCompiler {
public:
NVPTXCompiler();
int32_t GetToolkitVersion() const override;
absl::Status OptimizeHloConvolutionCanonicalization(
HloModule* hlo_module, se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version,
se::DeviceMemoryAllocator* device_allocator) override;
absl::Status OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool) override;
bool RequiresCollectiveScheduleLinearizer(
const HloModule* module, se::StreamExecutor* stream_exec) override;
absl::Status AddConvAndGemmAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config,
tsl::thread::ThreadPool* thread_pool) override;
absl::Status AddGemmFusionAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool,
const MultiProcessKeyValueStore& key_value_store) override;
absl::Status AddCustomKernelReplacementPasses(
HloPassPipeline* pipeline, const DebugOptions& debug_options) override;
absl::Status RunCudnnFusionCompilerPass(
HloModule* module, se::StreamExecutor* stream_exec,
Thunk::BinaryMap* dnn_compiled_graphs) override;
HloDataflowAnalysis::CanShareBuffer GetCanShareBuffer() const override;
absl::StatusOr<BackendCompileResult> CompileTargetBinary(
const HloModuleConfig& module_config, llvm::Module* llvm_module,
se::GpuComputeCapability gpu_version, bool relocatable,
const HloModule* debug_module, const CompileOptions& options) override;
enum class LinkingMethod {
kNone,
kNvLink,
kDriver,
};
absl::StatusOr<bool> CanUseLinkModules(
const HloModuleConfig& module_config) override;
private:
absl::StatusOr<std::vector<uint8_t>> LinkModules(
se::StreamExecutor* stream_exec,
std::vector<std::vector<uint8_t>> modules,
const DebugOptions& debug_options) override;
absl::Mutex mutex_;
absl::flat_hash_map<std::string, LinkingMethod> linking_methods_
ABSL_GUARDED_BY(mutex_);
absl::StatusOr<LinkingMethod> ChooseLinkingMethod(
const DebugOptions& debug_options);
absl::StatusOr<std::vector<uint8_t>> CompileGpuAsmOrGetCachedResult(
const std::string& ptx, se::CudaComputeCapability cc,
const HloModuleConfig& hlo_module_config, absl::string_view module_name,
bool relocatable, const CompileOptions& options);
struct CompilationCacheFlags {
template <typename H>
friend H AbslHashValue(H h, const CompilationCacheFlags& flags) {
return H::combine(std::move(h),
flags.filter_kernels_spilling_registers_on_autotuning);
}
friend bool operator==(const CompilationCacheFlags& a,
const CompilationCacheFlags& b) {
return a.filter_kernels_spilling_registers_on_autotuning ==
b.filter_kernels_spilling_registers_on_autotuning;
}
bool filter_kernels_spilling_registers_on_autotuning;
};
struct CompilationCacheKey {
CompilationCacheKey(std::string ptx, int cc_major, int cc_minor,
bool relocatable, CompilationCacheFlags flags)
: ptx(std::move(ptx)),
cc_major(cc_major),
cc_minor(cc_minor),
relocatable(relocatable),
flags(std::move(flags)) {}
template <typename H>
friend H AbslHashValue(H h, const CompilationCacheKey& key) {
return H::combine(std::move(h), key.ptx, key.cc_major, key.cc_minor,
key.relocatable, key.flags);
}
friend bool operator==(const CompilationCacheKey& a,
const CompilationCacheKey& b) {
return a.cc_major == b.cc_major && a.cc_minor == b.cc_minor &&
a.ptx == b.ptx && a.relocatable == b.relocatable &&
a.flags == b.flags;
}
std::string ptx;
int cc_major;
int cc_minor;
bool relocatable;
CompilationCacheFlags flags;
};
struct CompilationCacheValue {
bool compilation_done = false;
absl::StatusOr<std::vector<uint8_t>> maybe_cubin;
absl::Mutex mutex;
absl::CondVar compilation_done_cv;
};
absl::node_hash_map<CompilationCacheKey, CompilationCacheValue>
compilation_cache_ ABSL_GUARDED_BY(mutex_);
NVPTXCompiler(const NVPTXCompiler&) = delete;
NVPTXCompiler& operator=(const NVPTXCompiler&) = delete;
};
}
}
#endif
#include "xla/service/gpu/nvptx_compiler.h"
#include <array>
#include <cstdint>
#include <fstream>
#include <iterator>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/call_once.h"
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/call_inliner.h"
#include "xla/service/convert_mover.h"
#include "xla/service/dot_dimension_merger.h"
#include "xla/service/dump.h"
#include "xla/service/float_normalization.h"
#include "xla/service/float_support.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/buffer_sharing.h"
#include "xla/service/gpu/conv_algorithm_picker.h"
#include "xla/service/gpu/cublas_pad_for_gemms.h"
#include "xla/service/gpu/cublas_padding_requirements.h"
#include "xla/service/gpu/cudnn_fused_conv_rewriter.h"
#include "xla/service/gpu/cudnn_fused_mha_rewriter.h"
#include "xla/service/gpu/cudnn_fused_mha_transpose_fusion.h"
#include "xla/service/gpu/cudnn_fusion_compiler.h"
#include "xla/service/gpu/cudnn_norm_rewriter.h"
#include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include "xla/service/gpu/cudnn_simplify_padding.h"
#include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include "xla/service/gpu/cudnn_workspace_rewriter.h"
#include "xla/service/gpu/cusolver_rewriter.h"
#include "xla/service/gpu/dot_sparsity_rewriter.h"
#include "xla/service/gpu/gemm_algorithm_picker.h"
#include "xla/service/gpu/gemm_fusion_autotuner.h"
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/gpu/gpu_conv_padding_legalization.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/gpu_sort_rewriter.h"
#include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/gpu/move_copy_to_users.h"
#include "xla/service/gpu/target_constants.h"
#include "xla/service/gpu/triangular_solve_rewriter.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/cuda/cuda_asm_compiler.h"
#include "xla/stream_executor/cuda/cuda_diagnostics.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/cuda/ptx_compiler.h"
#include "xla/stream_executor/cuda/ptx_compiler_support.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/gpu/asm_compiler.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/util/env_var.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace gpu {
namespace {
class ConvBfloat16Support : public FloatSupport {
public:
explicit ConvBfloat16Support(
se::dnn::VersionInfo cudnn_version,
se::CudaComputeCapability cuda_compute_capability)
: FloatSupport(BF16),
is_conv_bf16_supported_((cudnn_version.major_version() > 8 ||
(cudnn_version.major_version() == 8 &&
cudnn_version.minor_version() >= 2)) &&
cuda_compute_capability.IsAtLeast(
se::CudaComputeCapability::AMPERE)) {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return (hlo.opcode() != HloOpcode::kConvolution) || is_conv_bf16_supported_;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return (hlo.opcode() != HloOpcode::kConvolution) || is_conv_bf16_supported_;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return (hlo.opcode() != HloOpcode::kConvolution);
}
private:
bool is_conv_bf16_supported_;
};
class MatmulBfloat16Support : public FloatSupport {
public:
explicit MatmulBfloat16Support(
se::CudaComputeCapability cuda_compute_capability)
: FloatSupport(BF16),
is_matmul_bf16_supported_(cuda_compute_capability.IsAtLeast(
se::CudaComputeCapability::AMPERE)) {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return (hlo.opcode() != HloOpcode::kDot) || is_matmul_bf16_supported_;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return (hlo.opcode() != HloOpcode::kDot) || is_matmul_bf16_supported_;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return true;
}
private:
bool is_matmul_bf16_supported_;
};
}
int32_t NVPTXCompiler::GetToolkitVersion() const { return CUDA_VERSION; }
absl::Status NVPTXCompiler::OptimizeHloConvolutionCanonicalization(
HloModule* hlo_module, se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version,
se::DeviceMemoryAllocator* device_allocator) {
auto cuda_compute_capability =
std::get<se::CudaComputeCapability>(gpu_version);
HloPassPipeline pipeline("conv_canonicalization");
pipeline.AddInvariantCheckerDebug<HloVerifier>(
false,
false);
ConvBfloat16Support conv_bf16_support(dnn_version, cuda_compute_capability);
pipeline.AddPass<FloatNormalization>(&conv_bf16_support);
MatmulBfloat16Support matmul_bf16_support(cuda_compute_capability);
pipeline.AddPass<FloatNormalization>(&matmul_bf16_support);
pipeline.AddPass<GpusolverRewriter>();
pipeline.AddPass<GpuConvRewriter>(cuda_compute_capability);
pipeline.AddPass<CudnnFusedConvRewriter>(cuda_compute_capability, dnn_version,
GetToolkitVersion());
pipeline.AddPass<GpuConvPaddingLegalization>();
pipeline.AddPass<CudnnPadForConvolutions>(cuda_compute_capability);
pipeline.AddPass<CudnnVectorizeConvolutions>(cuda_compute_capability,
dnn_version);
pipeline.AddPass<CallInliner>();
pipeline.AddPass<TupleSimplifier>();
AlgebraicSimplifierOptions algsimp_options =
GetAlgebraicSimplifierOptions(hlo_module->config());
algsimp_options.set_enable_conv_operand_swap(false);
algsimp_options.set_enable_unconditional_reduce_of_concat_replacement(false);
pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(algsimp_options,
gpu_version);
pipeline.AddPass<CudnnSimplifyPadding>();
[&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>(
"reshape_mover_after_conv_canonicalization")] {
ReshapeMoverOptions reshape_mover_options;
reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;
pipeline.AddPass<ReshapeMover>(reshape_mover_options);
pipeline.AddPass<GpuAlgebraicSimplifier>(algsimp_options, gpu_version);
}();
[&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>(
"simplify_after_conv_canonicalization")] {
pipeline.AddPass<ConvertMover>();
pipeline.AddPass<GpuAlgebraicSimplifier>(algsimp_options, gpu_version);
}();
pipeline.AddPass<HloConstantFolding>();
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
return absl::OkStatus();
}
absl::Status NVPTXCompiler::OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool) {
auto cuda_compute_capability = std::get<se::CudaComputeCapability>(
gpu_target_config.device_description.gpu_compute_capability());
if (hlo_module->config().debug_options().xla_gpu_enable_cudnn_fmha()) {
HloPassPipeline mha_fusion_pipeline(
"nvptx cudnn multi-headed attention fusion");
AlgebraicSimplifierOptions alg_sim_options =
GetAlgebraicSimplifierOptions(hlo_module->config());
alg_sim_options.set_supports_non_canonical_dots(false);
alg_sim_options.set_is_layout_sensitive(true);
alg_sim_options.set_enable_conv_operand_swap(false);
alg_sim_options.set_minmax_propagate_nan(
!hlo_module->config().debug_options().xla_gpu_enable_fast_min_max());
alg_sim_options.set_enable_unconditional_reduce_of_concat_replacement(
false);
mha_fusion_pipeline.AddPass<HloCSE>(true);
se::GpuComputeCapability gpu_version =
gpu_target_config.device_description.gpu_compute_capability();
mha_fusion_pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(
alg_sim_options, gpu_version);
mha_fusion_pipeline.AddPass<HloCSE>(true);
if (stream_exec) {
mha_fusion_pipeline.AddPass<CudnnFusedMHARewriter>(
cuda_compute_capability, stream_exec);
} else {
mha_fusion_pipeline.AddPass<CudnnFusedMHARewriter>(
cuda_compute_capability, gpu_target_config.dnn_version_info);
}
mha_fusion_pipeline.AddPass<GpuAlgebraicSimplifier>(alg_sim_options,
gpu_version);
mha_fusion_pipeline.AddPass<CudnnFusedMHATransposeFusion>();
mha_fusion_pipeline.AddPass<HloDCE>();
mha_fusion_pipeline.AddPass<HloCSE>(true);
TF_RETURN_IF_ERROR(mha_fusion_pipeline.Run(hlo_module).status());
}
HloPassPipeline pre_pipeline("nvptx post-layout_assignment part 1");
if (hlo_module->config().debug_options().xla_gpu_enable_cudnn_layer_norm()) {
pre_pipeline.AddPass<CudnnNormRewriter>(cuda_compute_capability);
}
pre_pipeline.AddPass<DotDimensionMerger>();
pre_pipeline.AddPass<DotSparsityRewriter>();
for (const CublasPaddingRequirement& requirement :
CublasPaddingRequirements) {
if (cuda_compute_capability.IsAtLeast(requirement.min_compute_capability)) {
pre_pipeline.AddPass<CublasPadForGemms>(cuda_compute_capability,
requirement.data_type,
requirement.multiple_of);
}
}
pre_pipeline.AddPass<HloConstantFolding>();
TF_RETURN_IF_ERROR(pre_pipeline.Run(hlo_module).status());
TF_RETURN_IF_ERROR(GpuCompiler::OptimizeHloPostLayoutAssignment(
hlo_module, stream_exec, options, gpu_target_config, thread_pool));
HloPassPipeline post_pipeline("nvptx post-layout_assignment part 2");
post_pipeline.AddPass<TriangularSolveRewriter>();
if (stream_exec) {
post_pipeline.AddPass<CuDnnWorkspaceRewriter>(*stream_exec);
}
TF_RETURN_IF_ERROR(post_pipeline.Run(hlo_module).status());
return absl::OkStatus();
}
bool NVPTXCompiler::RequiresCollectiveScheduleLinearizer(
const HloModule* module, se::StreamExecutor* stream_exec) {
if (stream_exec == nullptr || !GpuConvAlgorithmPicker::IsEnabled(module)) {
return false;
}
for (const HloComputation* comp : module->MakeNonfusionComputations()) {
for (const HloInstruction* inst : comp->instructions()) {
if (GpuConvAlgorithmPicker::IsCandidate(inst)) {
return true;
}
}
}
return false;
}
absl::Status NVPTXCompiler::AddConvAndGemmAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool) {
if (GpuConvAlgorithmPicker::IsEnabled(hlo_module)) {
pipeline->AddPass<GpuConvAlgorithmPicker>(autotune_config);
}
pipeline->AddPass<GemmAlgorithmPicker>(autotune_config);
return absl::OkStatus();
}
absl::Status NVPTXCompiler::AddGemmFusionAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool,
const MultiProcessKeyValueStore& key_value_store) {
pipeline->AddPass<GemmFusionAutotuner>(autotune_config, GetToolkitVersion(),
thread_pool, key_value_store);
return absl::OkStatus();
}
absl::Status NVPTXCompiler::AddCustomKernelReplacementPasses(
HloPassPipeline* pipeline, const DebugOptions& debug_options) {
if (debug_options.xla_gpu_enable_cub_radix_sort()) {
pipeline->AddPass<GpuSortRewriter>();
}
return absl::OkStatus();
}
absl::Status NVPTXCompiler::RunCudnnFusionCompilerPass(
HloModule* module, se::StreamExecutor* stream_exec,
Thunk::BinaryMap* dnn_compiled_graphs) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaCompileCudnnFusion:#module=%s,program_id=%d#",
module->name(), module->unique_id());
});
CuDnnFusionCompiler cudnn_compiler(*stream_exec, *dnn_compiled_graphs);
return cudnn_compiler.Run(module).status();
}
namespace {
bool MaybeLoadPtxFromFile(const HloModuleConfig module_config,
const HloModule* module, std::string* ptx) {
std::string prefix = xla::FilenameFor(*module, "", *ptx);
std::string matched_filename;
for (const std::string& full_filename :
module_config.debug_options().xla_gpu_ptx_file()) {
auto filename = tsl::io::Basename(full_filename);
if (absl::StartsWith(filename, prefix)) {
matched_filename = full_filename;
VLOG(1) << "RunBackend() - Will load PTX from file: " << full_filename;
break;
}
}
if (!module_config.debug_options().xla_gpu_ptx_file().empty() &&
matched_filename.empty()) {
VLOG(1) << "RunBackend() - For module with prefix '" << prefix
<< "', we did not found a PTX file to load.";
}
if (!matched_filename.empty()) {
std::ifstream ifs(matched_filename, std::ifstream::in);
*ptx = std::string(std::istreambuf_iterator<char>(ifs),
std::istreambuf_iterator<char>());
CHECK(!ptx->empty()) << "Empty or non existing PTX file: "
<< matched_filename;
return true;
}
return false;
}
std::unique_ptr<llvm::Module> MaybeLoadLLVMFromFile(const HloModule* module,
llvm::Module* llvm_module) {
if (module == nullptr) {
return nullptr;
}
std::string prefix = xla::FilenameFor(*module, "", "");
auto xla_gpu_llvm_ir_file =
module->config().debug_options().xla_gpu_llvm_ir_file();
auto matched_filename = absl::c_find_if(
xla_gpu_llvm_ir_file, [prefix](const std::string& full_filename) {
return absl::StartsWith(tsl::io::Basename(full_filename), prefix);
});
if (!xla_gpu_llvm_ir_file.empty() &&
matched_filename == std::end(xla_gpu_llvm_ir_file)) {
VLOG(1) << "RunBackend() - For module with prefix '" << prefix
<< "', we did not found a LLVM file to load.";
}
if (matched_filename != std::end(xla_gpu_llvm_ir_file)) {
VLOG(1) << "RunBackend() - Will load LLVM from file: " << *matched_filename;
llvm::LLVMContext& context = llvm_module->getContext();
llvm::SMDiagnostic err;
std::unique_ptr<llvm::Module> loaded_module =
llvm::parseIRFile(*matched_filename, err, context);
if (!loaded_module) {
err.print("ERR", llvm::errs());
LOG(FATAL) << "Failed to load an LLVM file. It is probably invalid LLVM.";
}
llvm_ir::DumpIrIfEnabled(*module, *loaded_module, false);
return loaded_module;
}
return nullptr;
}
}
void WarnIfBadDriverJITVersion() {
static absl::once_flag run_once;
absl::call_once(run_once, [] {
auto version_or_status = se::cuda::Diagnostician::FindKernelDriverVersion();
if (!version_or_status.ok()) {
LOG(WARNING) << "Couldn't read CUDA driver version.";
return;
}
se::cuda::DriverVersion version = version_or_status.value();
if (version < std::make_tuple(396, 20, 0)) {
LOG(WARNING)
<< "*** WARNING *** Invoking the PTX->SASS JIT from driver version "
<< se::cuda::DriverVersionToString(version)
<< ", which is older than 396.20.0. These versions are known to "
"miscompile XLA code, leading to incorrect results or "
"invalid-address errors.\nXLA only uses the driver JIT if it "
"cannot find ptxas; you don't need to update your driver if "
"you can point XLA to ptxas 9.2.88 or newer.";
}
});
}
NVPTXCompiler::NVPTXCompiler()
: GpuCompiler(stream_executor::cuda::kCudaPlatformId, nvptx::TargetTriple(),
nvptx::DataLayout()) {}
HloDataflowAnalysis::CanShareBuffer NVPTXCompiler::GetCanShareBuffer() const {
return &CanShareBufferHint;
}
absl::StatusOr<GpuCompiler::BackendCompileResult>
NVPTXCompiler::CompileTargetBinary(const HloModuleConfig& module_config,
llvm::Module* llvm_module,
se::GpuComputeCapability gpu_version,
bool relocatable,
const HloModule* debug_module,
const CompileOptions& options) {
std::unique_ptr<llvm::Module> loaded_module =
MaybeLoadLLVMFromFile(debug_module, llvm_module);
llvm::Module* selected_module = nullptr;
if (loaded_module) {
selected_module = loaded_module.get();
} else {
selected_module = llvm_module;
}
std::string ptx;
if (!(debug_module &&
MaybeLoadPtxFromFile(module_config, debug_module, &ptx))) {
XLA_SCOPED_LOGGING_TIMER_IF(
absl::StrCat(
"NVPTXCompiler::CompileTargetBinary - CompileToPtx for ",
(debug_module != nullptr ? debug_module->name() : "(unknown")),
!options.is_autotuning_compilation);
uint64_t start_usecs = tsl::Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(ptx,
nvptx::CompileToPtx(selected_module, gpu_version,
module_config.debug_options()));
uint64_t end_usecs = tsl::Env::Default()->NowMicros();
RecordLlvmPassesAndLlvmToPtxDuration(end_usecs - start_usecs);
}
absl::StatusOr<std::vector<uint8_t>> maybe_cubin =
CompileGpuAsmOrGetCachedResult(
ptx, std::get<se::CudaComputeCapability>(gpu_version), module_config,
(debug_module != nullptr ? debug_module->name() : "(unknown)"),
relocatable, options);
if (!maybe_cubin.ok()) {
return maybe_cubin.status();
}
return BackendCompileResult{std::move(ptx), std::move(maybe_cubin.value())};
}
static absl::StatusOr<std::vector<uint8_t>> AssembleOptionsAndCompile(
const std::string& ptx, se::CudaComputeCapability cc,
const HloModuleConfig& hlo_module_config,
GpuCompiler::CompileOptions options, bool relocatable) {
if (ptx.empty()) {
return std::vector<uint8_t>();
}
se::GpuAsmOpts ptxas_config =
PtxOptsFromDebugOptions(hlo_module_config.debug_options());
if (relocatable) {
ptxas_config.extra_flags.push_back("-c");
}
uint64_t start_usecs = tsl::Env::Default()->NowMicros();
bool cancel_if_reg_spill =
hlo_module_config.debug_options()
.xla_gpu_filter_kernels_spilling_registers_on_autotuning() &&
options.is_autotuning_compilat | #include "xla/service/gpu/nvptx_compiler.h"
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/backend.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/buffer_value.h"
#include "xla/service/gpu/gpu_constants.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/logical_buffer.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
class NVPTXCompilerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<BufferAssignment>> AssignBuffers(
HloModule* module) {
constexpr uint64_t pointer_size = 4;
const se::DeviceDescription& gpu_device_info =
backend().default_stream_executor()->GetDeviceDescription();
TF_RETURN_IF_ERROR(
ScheduleGpuModule(module, pointer_size, gpu_device_info).status());
auto buffer_size_bytes_function =
[](const BufferValue& buffer_value) -> int64_t {
return GetSizeOfShape(buffer_value.shape(), pointer_size);
};
return BufferAssigner::Run(
module, std::make_unique<SequentialHloOrdering>(module->schedule()),
buffer_size_bytes_function,
[](LogicalBuffer::Color) { return kXlaAllocatedBufferAlignBytes; });
}
};
class NVPTXCompilerTestTriton : public NVPTXCompilerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cublas_fallback(false);
return debug_options;
}
};
TEST_F(NVPTXCompilerTest, AllReducePerformedInplace) {
const absl::string_view hlo_string = R"(
HloModule Module, input_output_alias={ {}: (0, {}, may-alias) }
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0)
ROOT allreduce = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto buffer_assignment, AssignBuffers(module.get()));
HloInstruction* all_reduce = module->entry_computation()->root_instruction();
EXPECT_TRUE(buffer_assignment->SharesTopLevelSlice(all_reduce,
all_reduce->operand(0)));
}
TEST_F(NVPTXCompilerTest, AllReducePerformedInplaceTwoOperands) {
const absl::string_view hlo_string = R"(
HloModule Module,
input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias) }
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0)
param1 = f32[128] parameter(1)
ROOT allreduce = (f32[128], f32[128]) all-reduce(param0, param1),
replica_groups={}, to_apply=summit
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto buffer_assignment, AssignBuffers(module.get()));
HloInstruction* all_reduce = module->entry_computation()->root_instruction();
EXPECT_TRUE(buffer_assignment->SharesSliceAtIndex(
all_reduce, {0}, all_reduce->operand(0), {}));
EXPECT_TRUE(buffer_assignment->SharesSliceAtIndex(
all_reduce, {1}, all_reduce->operand(1), {}));
}
TEST_F(NVPTXCompilerTestTriton,
DotDimensionAreSortedBeforePaddingForCublasEnablingTritonFusion) {
const absl::string_view hlo_string = R"(
ENTRY e {
p0 = f16[11,22,33,44] parameter(0)
p1 = s8[11,22,33,44] parameter(1)
p1c = f16[11,22,33,44] convert(p1)
ROOT d = f16[11,22,44,44] dot(p0, p1c),
lhs_batch_dims={0,1}, lhs_contracting_dims={2},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
se::CudaComputeCapability cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (cc.IsAtLeastAmpere()) {
MatchOptimizedHlo(hlo_string, R"(
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: __triton_gemm
)");
} else {
MatchOptimizedHlo(hlo_string, R"(
; CHECK-NOT: triton
)");
}
}
TEST_F(NVPTXCompilerTest, RemovesUnnecessaryCopyInPostSchedulingPipelines) {
const absl::string_view hlo_text = R"(
HloModule all_gather_overlapping, is_scheduled=true
condition {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
add = f32[1,128] add(splat_c0, param_0)
param_1 = f32[2,128] get-tuple-element(input_tuple), index=1
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
copy = f32[2,128] copy(all-gather-done)
cond = pred[] get-tuple-element(input_tuple), index=2
ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, copy, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = pred[] parameter(2)
copy_param_0 = f32[1,128] copy(param_0)
copy_param_1 = f32[2,128] copy(param_1)
tuple = (f32[1,128], f32[2,128], pred[]) tuple(copy_param_0, copy_param_1, param_2)
while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body
get-tuple-element = f32[1,128]{1,0} get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=0
get-tuple-element.1 = f32[2,128]{1,0} get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=1
get-tuple-element.2 = pred[] get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=2
copy.3 = pred[] copy(pred[] get-tuple-element.2)
ROOT tuple.2 = (f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) tuple(f32[1,128]{1,0} get-tuple-element, f32[2,128]{1,0} get-tuple-element.1, pred[] copy.3)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
EXPECT_EQ(CountCopies(*module), 4);
const HloInstruction* while_op = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kCopy);
NVPTXCompiler compiler;
TF_EXPECT_OK(compiler.RunPostSchedulingPipelines(
module.get(), 100000,
backend().default_stream_executor()->GetDeviceDescription()));
EXPECT_EQ(CountCopies(*module), 3);
while_op = hlo_query::GetFirstInstructionWithOpcode(
*module->entry_computation(), HloOpcode::kWhile);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kAllGatherDone);
}
}
}
} | 2,060 |
#ifndef XLA_SERVICE_GPU_HORIZONTAL_INPUT_FUSION_H_
#define XLA_SERVICE_GPU_HORIZONTAL_INPUT_FUSION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class GpuHorizontalInputFusion : public HloModulePass {
public:
explicit GpuHorizontalInputFusion(const se::DeviceDescription& d)
: device_info_(d) {}
absl::string_view name() const override {
return "gpu_horizontal_input_fusion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation*);
const se::DeviceDescription& device_info_;
};
}
}
#endif
#include "xla/service/gpu/horizontal_input_fusion.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
Shape GetInputShapeForMultiOutputFusion(const HloInstruction& instr) {
const HloInstruction* real_hero = GetRealHeroForMultiOutputFusion(instr);
if (real_hero->operands().empty()) {
return Shape();
} else {
return real_hero->operand(0)->shape();
}
}
class HorizontalInputFusionImpl {
public:
explicit HorizontalInputFusionImpl(HloComputation* computation,
const se::DeviceDescription& d)
: computation_(computation), device_info_(d) {}
~HorizontalInputFusionImpl() = default;
absl::StatusOr<bool> Run();
private:
HloComputation* computation_;
const se::DeviceDescription& device_info_;
};
bool CompareShapeDimsFromLeftToRight(const Shape& shape_a,
const Shape& shape_b) {
if (shape_a.rank() != shape_b.rank()) {
return shape_a.rank() < shape_b.rank();
}
auto dims_a = shape_a.dimensions();
auto dims_b = shape_b.dimensions();
for (size_t i = 0; i < dims_a.size(); ++i) {
if (dims_a[i] != dims_b[i]) {
return dims_a[i] < dims_b[i];
}
}
return true;
}
std::vector<HloInstruction*> FindAndSortFusionCandidates(
HloInstruction* consumer) {
absl::flat_hash_set<HloInstruction*> fusion_instr_set;
std::vector<HloInstruction*> fusion_instrs;
for (HloInstruction* opnd : consumer->operands()) {
HloInstruction* predecessor = opnd->LatestNonGteAncestor();
if (IsInputFusibleReduction(*predecessor) &&
IsConsumerTheOnlyNonRootUser(*predecessor, *consumer)) {
if (fusion_instr_set.insert(predecessor).second) {
fusion_instrs.push_back(predecessor);
}
}
}
std::sort(fusion_instrs.begin(), fusion_instrs.end(),
[&](const HloInstruction* a, const HloInstruction* b) {
Shape shape_a = GetInputShapeForMultiOutputFusion(*a);
Shape shape_b = GetInputShapeForMultiOutputFusion(*b);
if (!ShapeUtil::EqualIgnoringElementType(shape_a, shape_b)) {
return CompareShapeDimsFromLeftToRight(shape_a, shape_b);
}
return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b);
});
return fusion_instrs;
}
absl::StatusOr<bool> HorizontalInputFusionImpl::Run() {
bool changed = false;
XLA_VLOG_LINES(3, computation_->ToString());
std::vector<HloInstruction*> def_to_use_order =
computation_->MakeInstructionPostOrder();
for (HloInstruction* consumer : def_to_use_order) {
auto candidates = FindAndSortFusionCandidates(consumer);
if (candidates.size() <= 1) {
continue;
}
for (size_t j = 0; j < candidates.size(); ++j) {
if (candidates[j]->opcode() != HloOpcode::kFusion) {
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion_instr,
MakeFusionInstruction(candidates[j],
HloInstruction::FusionKind::kInput));
candidates[j] = fusion_instr;
changed = true;
}
}
size_t fusion_anchor_id = 0;
for (size_t j = 1; j < candidates.size(); ++j) {
HloInstruction* fusion_anchor = candidates[fusion_anchor_id];
HloInstruction* fused = candidates[j];
if (ShapesCompatibleForMultiOutputFusion(*fusion_anchor, *fused) &&
FusionFitsInBudget(*fusion_anchor, *fused, device_info_)) {
VLOG(3) << "Fuse " << fused->ToString() << " into "
<< fusion_anchor->ToString();
fusion_anchor->MergeFusionInstructionIntoMultiOutput(fused);
changed = true;
} else {
VLOG(3) << j - fusion_anchor_id - 1 << " instructions are fused.";
fusion_anchor_id = j;
}
}
}
return changed;
}
}
absl::StatusOr<bool> GpuHorizontalInputFusion::RunOnComputation(
HloComputation* computation) {
HorizontalInputFusionImpl horizontal_fusion_impl(computation, device_info_);
return horizontal_fusion_impl.Run();
}
absl::StatusOr<bool> GpuHorizontalInputFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(2) << "Run horizontal input fusion.";
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(changed, RunOnComputation(comp));
}
return changed;
}
}
} | #include "xla/service/gpu/horizontal_input_fusion.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class HorizontalInputFusionTest : public GpuCodegenTest {
public:
se::DeviceDescription device_description_{
TestGpuDeviceInfo::RTXA6000DeviceInfo()};
GpuHorizontalInputFusion horizontal_input_fusion_{device_description_};
};
TEST_F(HorizontalInputFusionTest, BasicTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
}
fused_computation.2 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
fusion.1 = f16[] fusion(arg.1), kind=kInput, calls=fused_computation.1
fusion.2 = f16[] fusion(arg.2), kind=kInput, calls=fused_computation.2
ROOT tuple.1 = (f16[], f16[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))),
(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(HorizontalInputFusionTest, ManyInputFusions) {
auto module = CreateNewVerifiedModule();
HloComputation* reduce_computation;
{
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
reduce_computation =
module->AddEmbeddedComputation(embedded_builder.Build());
}
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> var_outs;
auto input_shape = ShapeUtil::MakeShape(F32, {1024, 1024});
auto output_shape = ShapeUtil::MakeShape(F32, {1024});
for (int64_t i = 0; i < 130; ++i) {
HloInstruction* param_var_in = builder.AddInstruction(
HloInstruction::CreateParameter(i * 2 + 0, input_shape, "var.in"));
HloInstruction* param_alpha =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 2 + 1, ShapeUtil::MakeShape(F32, {}), "alpha"));
auto alpha_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(input_shape, param_alpha, {}));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
input_shape, HloOpcode::kMultiply, param_var_in, alpha_broadcasted));
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
output_shape, mul, const0, {1}, reduce_computation));
var_outs.push_back(reduce);
}
builder.AddInstruction(HloInstruction::CreateTuple(var_outs));
module->AddEntryComputation(builder.Build());
CompileAndVerifyIr(module->Clone(), R"(CHECK: reduce-group-6)",
false);
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5}));
}
TEST_F(HorizontalInputFusionTest, MultiOutputFusionTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MultiOutputFusionTest
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
add.0 = f16[1024] add(arg.1, arg.1)
ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0)
}
fused_computation.2 {
arg.1 = f16[1024]{0} parameter(0)
constant0 = f16[] constant(0)
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
add.0 = f16[1024] add(arg.1, arg.1)
ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0)
}
fused_computation.3 {
arg.0 = f16[1024]{0} parameter(0)
arg.1 = f16[1024]{0} parameter(1)
add.0 = f16[1024] add(arg.0, arg.1)
mul.0 = f16[1024] multiply(arg.0, arg.1)
ROOT tuple.1 = (f16[1024], f16[1024]) tuple(add.0, mul.0)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
fusion.1 = (f16[],f16[1024]) fusion(arg.1), kind=kInput, calls=fused_computation.1
fusion.2 = (f16[],f16[1024]) fusion(arg.2), kind=kInput, calls=fused_computation.2
gte.3 = f16[] get-tuple-element(fusion.1), index=0
gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=1
gte.2 = f16[1024]{0} get-tuple-element(fusion.2), index=1
gte.6 = f16[] get-tuple-element(fusion.2), index=0
fusion.3 = (f16[1024],f16[1024]) fusion(gte.1, gte.2),
kind=kLoop, calls=fused_computation.3
gte.4 = f16[1024] get-tuple-element(fusion.3), index=0
gte.5 = f16[1024]{0} get-tuple-element(fusion.3), index=1
ROOT tuple.1 = (f16[], f16[1024], f16[1024]{0}, f16[])
tuple(gte.3, gte.4, gte.5, gte.6)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
}
TEST_F(HorizontalInputFusionTest, NonfusionInstrs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NonfusionInstrs
%add_f16 {
%x = f16[] parameter(0)
%y = f16[] parameter(1)
ROOT %add = f16[] add(%x, %y)
}
ENTRY entry_computation {
arg.0 = f16[1024]{0} parameter(0)
arg.1 = f16[1024]{0} parameter(1)
constant0 = f16[] constant(0)
reduce.0 = f16[] reduce(arg.0, constant0), dimensions={0}, to_apply=%add_f16
reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16
ROOT tuple.0 = (f16[], f16[]) tuple(reduce.0, reduce.1)
}
)")
.value();
EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))),
(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
}
}
} | 2,061 |
#ifndef XLA_SERVICE_GPU_AUTOTUNER_UTIL_H_
#define XLA_SERVICE_GPU_AUTOTUNER_UTIL_H_
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
struct DeviceConfig {
se::StreamExecutor* stream_exec;
se::DeviceMemoryAllocator* allocator = nullptr;
};
struct DevicelessConfig {
std::string model_str;
se::GpuComputeCapability gpu_compute_capability{
se::CudaComputeCapability{0, 0}};
};
class AutotuneCacheKey {
public:
AutotuneCacheKey(absl::string_view model_str,
const HloInstruction& instruction);
explicit AutotuneCacheKey(absl::string_view model_str,
absl::string_view hlo_canonical)
: model_str_(model_str), hlo_canonical_(hlo_canonical) {}
absl::string_view GetModelStr() const { return model_str_; }
absl::string_view GetHlo() const { return hlo_canonical_; }
template <typename H>
friend H AbslHashValue(H h, const AutotuneCacheKey& w) {
return H::combine(std::move(h), w.model_str_, w.hlo_canonical_);
}
bool operator==(const AutotuneCacheKey& w) const {
return model_str_ == w.model_str_ && hlo_canonical_ == w.hlo_canonical_;
}
std::string ToString() const {
return absl::StrFormat("<key model='%s', hlo='%s'>", model_str_,
hlo_canonical_);
}
private:
std::string model_str_;
std::string hlo_canonical_;
};
class AutotuneConfig {
public:
bool should_init_buffers() const { return autotune_level_ >= 2; }
bool should_reinit_output_buffer() const { return autotune_level_ >= 3; }
bool should_check_correctness() const { return autotune_level_ >= 4; }
bool should_crash_on_check_failure() const {
return should_crash_on_check_failure_;
}
bool should_require_complete_aot_autotune_results() const {
return require_complete_aot_autotune_results_;
}
const std::string& autotune_cache_dir() const { return autotune_cache_dir_; }
AutotuneConfig(const AutotuneConfig& right)
: config_(right.config_),
autotune_level_(right.autotune_level_),
should_crash_on_check_failure_(right.should_crash_on_check_failure_),
exhaustive_tiling_search_(right.exhaustive_tiling_search_),
require_complete_aot_autotune_results_(
right.require_complete_aot_autotune_results_),
autotune_cache_dir_(right.autotune_cache_dir_) {}
AutotuneConfig(const std::variant<DeviceConfig, DevicelessConfig>& config,
const DebugOptions& debug_options)
: config_(config),
autotune_level_(debug_options.xla_gpu_autotune_level()),
should_crash_on_check_failure_(
debug_options.xla_gpu_crash_on_verification_failures()),
exhaustive_tiling_search_(
debug_options.xla_gpu_exhaustive_tiling_search()),
require_complete_aot_autotune_results_(
debug_options.xla_gpu_require_complete_aot_autotune_results()),
autotune_cache_dir_(
debug_options.xla_gpu_per_fusion_autotune_cache_dir()) {}
absl::string_view GetModelStr() const {
if (auto deviceless_config = std::get_if<DevicelessConfig>(&config_)) {
return deviceless_config->model_str;
}
const auto& device_config = std::get<DeviceConfig>(config_);
return device_config.stream_exec->GetDeviceDescription().model_str();
}
se::StreamExecutor* GetExecutor() const {
CHECK(std::holds_alternative<DeviceConfig>(config_));
return std::get<DeviceConfig>(config_).stream_exec;
}
se::DeviceMemoryAllocator* GetAllocator() const {
CHECK(std::holds_alternative<DeviceConfig>(config_));
auto& cf = std::get<DeviceConfig>(config_);
if (cf.allocator != nullptr) {
return cf.allocator;
}
if (allocator_ == nullptr) {
allocator_ =
std::make_unique<se::StreamExecutorMemoryAllocator>(GetExecutor());
}
return allocator_.get();
}
absl::StatusOr<se::Stream*> GetStream() const {
CHECK(std::holds_alternative<DeviceConfig>(config_));
return GetAllocator()->GetStream(GetExecutor()->device_ordinal());
}
const se::GpuComputeCapability& GetGpuComputeCapability() const {
if (auto c = std::get_if<DeviceConfig>(&config_)) {
return c->stream_exec->GetDeviceDescription().gpu_compute_capability();
}
return std::get<DevicelessConfig>(config_).gpu_compute_capability;
}
bool IsDeviceless() const {
return std::holds_alternative<DevicelessConfig>(config_);
}
bool ExhaustiveTilingSearch() const { return exhaustive_tiling_search_; }
private:
std::variant<DeviceConfig, DevicelessConfig> config_;
int32_t autotune_level_;
bool should_crash_on_check_failure_;
bool exhaustive_tiling_search_;
bool require_complete_aot_autotune_results_;
mutable std::unique_ptr<se::DeviceMemoryAllocator> allocator_;
std::string autotune_cache_dir_;
};
using AutotuneNoCacheFn = std::function<absl::StatusOr<AutotuneResult>()>;
struct AutotunerUtil {
static absl::StatusOr<se::DeviceMemoryBase> CreateBuffer(
se::RedzoneAllocator& allocator, const Shape& shape,
const AutotuneConfig& config, int64_t& rng_state);
static absl::StatusOr<AutotuneResult> Autotune(
const HloInstruction* instr, const AutotuneConfig& config,
const AutotuneNoCacheFn& autotune_fn);
static AutotuneCacheKey GetKey(const HloInstruction* instr,
const AutotuneConfig& config);
static absl::StatusOr<bool> IsInCache(const AutotuneCacheKey& key,
const AutotuneConfig& config);
static absl::StatusOr<bool> AddResult(const AutotuneCacheKey& key,
AutotuneResult result,
const AutotuneConfig& config);
static absl::StatusOr<se::RedzoneAllocator> CreateRedzoneAllocator(
const AutotuneConfig& config, const DebugOptions& opts);
static absl::StatusOr<std::string> SerializeAutotuneResults(
bool as_textproto = false);
static absl::Status SerializeAutotuneResults(AutotuneResults* results);
static absl::Status LoadAutotuneResults(absl::string_view data,
bool as_textproto = false);
static absl::Status LoadAutotuneResults(const AutotuneResults& results);
static absl::Status SerializeAutotuneResultsToFile(
absl::string_view file_path);
static absl::Status SerializeAutotuneResultsToFile(
const AutotuneResults& results, absl::string_view file_path);
static absl::Status LoadAutotuneResultsFromFile(absl::string_view file_path);
static void ClearAutotuneResults();
static bool ResultCacheIsEmpty();
};
absl::StatusOr<std::string> AutotuneResultsToString(
const AutotuneResults& results, bool as_textproto);
absl::StatusOr<std::string> GetBase64EncodedSha256Hash(absl::string_view s);
}
}
#endif
#include "xla/service/gpu/autotuner_util.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SHA256.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr int kVersion = 3;
}
using AutotuneCacheMap = absl::flat_hash_map<AutotuneCacheKey, AutotuneResult>;
static absl::Mutex autotune_cache_mu(absl::kConstInit);
static auto& autotune_cache ABSL_GUARDED_BY(autotune_cache_mu) =
*new AutotuneCacheMap();
absl::StatusOr<std::string> GetBase64EncodedSha256Hash(absl::string_view s) {
llvm::SHA256 sha256;
sha256.update(llvm::StringRef(s));
std::array<uint8_t, 32> hash = sha256.final();
absl::string_view hash_view(reinterpret_cast<const char*>(hash.data()),
hash.size());
std::string base64_encoded_hash;
TF_RETURN_IF_ERROR(tsl::Base64Encode(hash_view, &base64_encoded_hash));
return base64_encoded_hash;
}
namespace {
absl::StatusOr<std::string> GetCacheFilePath(absl::string_view cache_dir,
const AutotuneCacheKey& key) {
if (cache_dir.empty()) {
return absl::InvalidArgumentError("autotune_cache_dir should not be empty");
}
TF_ASSIGN_OR_RETURN(std::string key_hash,
GetBase64EncodedSha256Hash(key.ToString()));
return tsl::io::JoinPath(cache_dir, absl::StrCat(key_hash, ".textproto"));
}
struct ResultAndInserted {
AutotuneResult result;
bool inserted;
};
ResultAndInserted AddResultToInMemoryCache(const AutotuneCacheKey& key,
AutotuneResult result)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto [it, inserted] = autotune_cache.emplace(key, std::move(result));
return {it->second, inserted};
}
absl::Status AddResultToFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
AutotuneResult result,
std::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key));
VLOG(1) << "Writing autotune result to file: " << file_path;
std::string result_str;
if (!tsl::protobuf::TextFormat::PrintToString(result, &result_str)) {
return absl::InternalError("Failed to serialize autotune result.");
}
std::string temp_file_path = tsl::io::GetTempFilename(".textproto");
tsl::Env* default_env = tsl::Env::Default();
TF_RETURN_IF_ERROR(
tsl::WriteStringToFile(default_env, temp_file_path, result_str));
return default_env->RenameFile(temp_file_path, file_path);
}
absl::StatusOr<ResultAndInserted> AddResultToCaches(const AutotuneCacheKey& key,
AutotuneResult result,
std::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
ResultAndInserted result_and_inserted = AddResultToInMemoryCache(key, result);
if (result_and_inserted.inserted) {
TF_RETURN_IF_ERROR(AddResultToFileBasedCacheIfEnabled(
key, result_and_inserted.result, cache_dir));
}
return result_and_inserted;
}
std::optional<AutotuneResult> TryToFindInInMemoryCache(
const AutotuneCacheKey& key) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto it = autotune_cache.find(key);
if (it == autotune_cache.end()) {
return std::nullopt;
}
return it->second;
}
absl::StatusOr<std::optional<AutotuneResult>>
TryToFindInFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return std::nullopt;
}
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key));
if (!tsl::Env::Default()->FileExists(file_path).ok()) {
VLOG(1) << "Autotune result file not found: " << file_path;
return std::nullopt;
}
VLOG(1) << "Autotune result file found: " << file_path;
std::string autotune_result_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), file_path,
&autotune_result_str));
AutotuneResult result;
if (!tsl::protobuf::TextFormat::ParseFromString(autotune_result_str,
&result)) {
return absl::InvalidArgumentError("Failed to parse autotune result.");
}
return result;
}
void SortAutotuneResults(AutotuneResults* results) {
std::sort(results->mutable_results()->pointer_begin(),
results->mutable_results()->pointer_end(),
[](const auto* a, const auto* b) {
return std::make_pair(absl::string_view(a->device()),
absl::string_view(a->hlo())) <
std::make_pair(absl::string_view(b->device()),
absl::string_view(b->hlo()));
});
}
}
absl::StatusOr<std::string> AutotuneResultsToString(
const AutotuneResults& results, bool as_textproto) {
if (as_textproto) {
std::string textproto;
if (tsl::protobuf::TextFormat::PrintToString(results, &textproto)) {
return textproto;
} else {
return Internal("Failed to serialize autotune results.");
}
}
return results.SerializeAsString();
}
namespace {
void SerializeAutotuneEntry(AutotuneResults* results, const AutotuneCacheKey& k,
const AutotuneResult* res) {
auto& entry = *results->add_results();
entry.set_device(std::string(k.GetModelStr()));
entry.set_hlo(std::string(k.GetHlo()));
*entry.mutable_result() = *res;
}
}
absl::Status AutotunerUtil::SerializeAutotuneResults(
AutotuneResults* results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const auto& [k, result] : autotune_cache) {
SerializeAutotuneEntry(results, k, &result);
}
results->set_version(kVersion);
SortAutotuneResults(results);
return absl::OkStatus();
}
absl::Status AutotunerUtil::LoadAutotuneResults(
const AutotuneResults& results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const AutotuneResults::Entry& result : results.results()) {
if (auto [it, inserted] = autotune_cache.emplace(
AutotuneCacheKey(result.device(), result.hlo()), result.result());
!inserted) {
return absl::InternalError(absl::StrCat(
"Duplicate autotuning result for ", it->first.ToString()));
}
}
return absl::OkStatus();
}
void AutotunerUtil::ClearAutotuneResults() {
absl::MutexLock lock(&autotune_cache_mu);
autotune_cache.clear();
}
bool AutotunerUtil::ResultCacheIsEmpty() {
absl::MutexLock lock(&autotune_cache_mu);
return autotune_cache.empty();
}
absl::StatusOr<se::DeviceMemoryBase> AutotunerUtil::CreateBuffer(
se::RedzoneAllocator& allocator, const Shape& shape,
const AutotuneConfig& config, int64_t& rng_state) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase buffer,
allocator.AllocateBytes(ShapeUtil::ByteSizeOf(shape)));
if (config.should_init_buffers()) {
InitializeBuffer(allocator.stream(), shape.element_type(), &rng_state,
buffer);
}
return buffer;
}
namespace {
std::string ToCanonicalString(const HloInstruction* instr) {
auto options = HloPrintOptions::Canonical();
if (instr->opcode() != HloOpcode::kFusion) {
options.set_print_backend_config(true);
return instr->ToString(options);
}
options.set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff);
options.set_print_infeed_outfeed_config(false);
options.set_print_only_essential_constants(true);
options.set_print_operand_shape(true);
options.set_print_ids(false);
options.set_canonicalize_computations(true);
return instr->called_computations()[0]->ToString(options);
}
}
AutotuneCacheKey::AutotuneCacheKey(absl::string_view model_str,
const HloInstruction& instr)
: AutotuneCacheKey(model_str, ToCanonicalString(&instr)) {}
namespace {
absl::StatusOr<std::optional<AutotuneResult>> TryFindInCache(
const AutotuneCacheKey& key, absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
std::optional<AutotuneResult> opt_result = TryToFindInInMemoryCache(key);
if (opt_result.has_value()) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "In-memory autotune cache hit";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "In-memory autotune cache hit: key = " << key.ToString();
}
return opt_result;
}
TF_ASSIGN_OR_RETURN(opt_result,
TryToFindInFileBasedCacheIfEnabled(key, cache_dir));
if (opt_result.has_value()) {
AddResultToInMemoryCache(key, opt_result.value());
if (VLOG_IS_ON(1)) {
LOG(INFO) << "File-based autotune cache hit";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "File-based autotune cache hit: key = " << key.ToString();
}
return opt_result;
}
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Autotune cache miss";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "Autotune cache miss: key = " << key.ToString();
}
return std::nullopt;
}
}
AutotuneCacheKey AutotunerUtil::GetKey(
const HloInstruction* instr, const AutotuneConfig& config) {
return AutotuneCacheKey(config.GetModelStr(), *instr);
}
absl::StatusOr<bool> AutotunerUtil::IsInCache(
const AutotuneCacheKey& key, const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
return opt_res.has_value();
}
absl::StatusOr<bool> AutotunerUtil::AddResult(
const AutotuneCacheKey& key, AutotuneResult result,
const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(
ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(result), config.autotune_cache_dir()));
return result_and_inserted.inserted;
}
absl::StatusOr<AutotuneResult> AutotunerUtil::Autotune(
const HloInstruction* instr, const AutotuneConfig& config,
const AutotuneNoCacheFn& autotune_fn) {
const AutotuneCacheKey key = GetKey(instr, config);
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
if (opt_res.has_value()) {
return opt_res.value();
}
if (config.should_require_complete_aot_autotune_results()) {
return NotFound(
"Complete XLA AOT autotuning results are required, but no AOT result "
"was found for key: %s",
key.ToString());
}
TF_ASSIGN_OR_RETURN(AutotuneResult autotune_result, autotune_fn());
TF_ASSIGN_OR_RETURN(ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(autotune_result),
config.autotune_cache_dir()));
return result_and_inserted.result;
}
namespace {
bool IsTextProtoPath(absl::string_view file_path) {
return absl::EndsWith(file_path, ".txt") ||
absl::EndsWith(file_path, ".textproto") ||
absl::EndsWith(file_path, ".prototxt") ||
absl::EndsWith(file_path, ".pbtxt");
}
}
absl::Status AutotunerUtil::LoadAutotuneResults(
absl::string_view data, bool as_textproto) {
AutotuneResults results;
bool parse_success =
as_textproto ? tsl::protobuf::TextFormat::ParseFromString(
std::string(data), &results)
: results.ParseFromString(std::string(data));
if (!parse_success) {
return absl::InvalidArgumentError(
"Failed to parse autotune results string.");
}
if (results.version() != kVersion) {
return absl::InvalidArgumentError(absl::StrFormat(
"Version mismatch in autotune results. Expected %d but was %d",
kVersion, results.version()));
}
TF_RETURN_IF_ERROR(LoadAutotuneResults(results));
return absl::OkStatus();
}
absl::StatusOr<std::string> AutotunerUtil::SerializeAutotuneResults(
bool as_textproto) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return AutotuneResultsToString(results, as_textproto);
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
const AutotuneResults& results, absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
TF_RET_CHECK(results.version() > 0)
<< "Did you call SerializeAutotuneResults to get this AutotuneResults?";
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
TF_ASSIGN_OR_RETURN(
std::string autotune_results_str,
AutotuneResultsToString(results, IsTextProtoPath(resolved_path)));
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), resolved_path,
autotune_results_str));
LOG(INFO) << "Autotune results serialized to file: " << resolved_path;
return absl::OkStatus();
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
absl::string_view file_path) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return SerializeAutotuneResultsToFile(results, file_path);
}
absl::Status AutotunerUtil::LoadAutotuneResultsFromFile(
absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
if (!tsl::Env::Default()->FileExists(resolved_path).ok()) {
return FailedPrecondition("Autotune results file does not exist: %s",
resolved_path);
}
std::string autotune_results_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), resolved_path,
&autotune_results_str));
TF_RETURN_IF_ERROR(LoadAutotuneResults(autotune_results_str,
IsTextProtoPath(resolved_path)));
LOG(INFO) << "Autotune results loaded from file: " << resolved_path;
return absl::OkStatus();
}
absl::StatusOr<se::RedzoneAllocator>
AutotunerUtil::CreateRedzoneAllocator(const AutotuneConfig& config,
const DebugOptions& opts) {
TF_ASSIGN_OR_RETURN(se::Stream * stream, config.GetStream());
return se::RedzoneAllocator(
stream, config.GetAllocator(), PtxOptsFromDebugOptions(opts),
std::numeric_limits<int64_t>::max(),
config.should_check_correctness()
? opts.xla_gpu_redzone_padding_bytes()
: 0);
}
}
} | #include "xla/service/gpu/autotuner_util.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
using ::tsl::testing::StatusIs;
class AutotunerUtilTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
static constexpr absl::string_view kResultText = R"(
version: 3
results {
device: "sm_8.0 with 42331013120B RAM, 108 cores, 1410000KHz clock, 1215000KHz mem clock, 41943040B L2$"
hlo: "{\n tmp_0 = f16[1,16,17,3]{3,2,1,0} parameter(0)\n tmp_1 = f16[16,51]{1,0} bitcast(f16[1,16,17,3]{3,2,1,0} tmp_0)\n tmp_2 = s8[16,17,3]{2,1,0} parameter(1)\n tmp_3 = s8[51,16]{0,1} bitcast(s8[16,17,3]{2,1,0} tmp_2)\n tmp_4 = f16[51,16]{0,1} convert(s8[51,16]{0,1} tmp_3)\n tmp_5 = f16[16,16]{1,0} dot(f16[16,51]{1,0} tmp_1, f16[51,16]{0,1} tmp_4), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tmp_6 = f16[1,16,16]{2,1,0} bitcast(f16[16,16]{1,0} tmp_5)\n}"
result {
run_time {
nanos: 31744
}
triton {
block_m: 32
block_n: 32
block_k: 32
split_k: 1
num_stages: 1
num_warps: 4
num_ctas: 1
}
}
})";
void SetUp() override { AutotunerUtil::ClearAutotuneResults(); }
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
static std::unique_ptr<stream_executor::StreamExecutor> NewStreamExecutor() {
stream_executor::Platform* platform =
stream_executor::PlatformManager::PlatformWithName("Host").value();
stream_executor::StreamExecutorConfig config(0);
return platform->GetUncachedExecutor(config).value();
}
absl::Status PopulateResultCache() {
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_RETURN_IF_ERROR(AutotunerUtil::LoadAutotuneResults(kResultText, true));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
return absl::OkStatus();
}
};
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
EXPECT_GT(results.results_size(), 0);
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(results.ParseFromString(autotune_results_str));
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, ResultConflictsAreDetected) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
EXPECT_THAT(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Duplicate autotuning result")));
}
TEST_F(AutotunerUtilTest, FailIfRequireCompleteAotAutotuning) {
std::string kFilePath = GetUniqueTempFilePath(".txt");
auto hlo_module = GetOptimizedModule(kHloText);
TF_EXPECT_OK(hlo_module.status());
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
std::unique_ptr<stream_executor::StreamExecutor> executor =
NewStreamExecutor();
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor.get()}, options);
EXPECT_THAT(
AutotunerUtil::Autotune(instruction, config,
[&] { return AutotuneResult(); }),
StatusIs(
absl::StatusCode::kNotFound,
HasSubstr("Complete XLA AOT autotuning results are required, but "
"no AOT result was found for key: <key model")));
}
TEST_F(AutotunerUtilTest, OkIfJitAutotuningDisabledButAlreadyLoadedAOT) {
auto hlo_module = GetOptimizedModule(kHloText);
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
std::unique_ptr<stream_executor::StreamExecutor> executor =
NewStreamExecutor();
{
AutotuneConfig config(DeviceConfig{executor.get()}, DebugOptions());
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
}
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor.get()}, options);
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
}
class FileBasedCacheTest : public AutotunerUtilTest {
public:
static std::string ToString(const proto2::Message& message) {
std::string textproto;
CHECK(tsl::protobuf::TextFormat::PrintToString(message, &textproto));
return textproto;
}
static std::vector<std::string> GetFilesInDir(
const absl::string_view cache_dir) {
std::vector<std::string> files_in_cache;
TF_CHECK_OK(tsl::Env::Default()->GetChildren(std::string(cache_dir),
&files_in_cache));
return files_in_cache;
}
static std::string Read(const absl::string_view filepath) {
std::string file_content;
TF_CHECK_OK(tsl::ReadFileToString(tsl::Env::Default(),
std::string(filepath), &file_content));
return file_content;
}
static void Write(const absl::string_view filepath,
const absl::string_view content) {
TF_CHECK_OK(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(filepath), content));
}
std::unique_ptr<stream_executor::StreamExecutor> executor_ =
NewStreamExecutor();
std::unique_ptr<HloModule> module_ =
ParseAndReturnVerifiedModule(kHloText).value();
const HloInstruction* dot_ = hlo_query::GetFirstInstructionWithOpcode(
*module_->entry_computation(), HloOpcode::kDot);
std::string cache_dir_ = [] {
tsl::Env* default_env = tsl::Env::Default();
std::string cache_dir;
CHECK(default_env->LocalTempFilename(&cache_dir));
CHECK_OK(default_env->CreateDir(cache_dir));
return cache_dir;
}();
AutotuneConfig config_ = AutotuneConfig(DeviceConfig{executor_.get()}, [&] {
DebugOptions options;
options.set_xla_gpu_per_fusion_autotune_cache_dir(cache_dir_);
return options;
}());
AutotuneCacheKey cache_key_ = AutotunerUtil::GetKey(dot_, config_);
std::string cache_filename_ = [&] {
absl::StatusOr<std::string> key_hash =
GetBase64EncodedSha256Hash(cache_key_.ToString());
CHECK_OK(key_hash.status());
return absl::StrCat(key_hash.value(), ".textproto");
}();
std::string cache_file_path_ = tsl::io::JoinPath(cache_dir_, cache_filename_);
const AutotuneResult result1_ = [] {
AutotuneResult result;
result.set_scratch_bytes(1);
return result;
}();
const AutotuneResult result2_ = [] {
AutotuneResult result;
result.set_scratch_bytes(2);
return result;
}();
};
TEST_F(FileBasedCacheTest, AutotuneWritesResultToTheCacheDir) {
TF_ASSERT_OK_AND_ASSIGN(
AutotuneResult result,
AutotunerUtil::Autotune(dot_, config_, [&] { return result1_; }));
EXPECT_EQ(ToString(result), ToString(result1_));
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
}
TEST_F(FileBasedCacheTest, AutotuneReadsResultFromTheCacheDir) {
Write(cache_file_path_, ToString(result1_));
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(dot_, config_, [&] {
cache_hit = false;
return result2_;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(ToString(result), ToString(result1_));
}
TEST_F(FileBasedCacheTest,
RepeatedAutotuneCallsDontReadOrWriteTheCacheFileAgain) {
auto check_autotune_cache_hit = [](const HloInstruction* instr,
const AutotuneConfig& config,
const AutotuneResult& expected_result) {
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(instr, config, [&] {
cache_hit = false;
AutotuneResult new_result;
new_result.set_scratch_bytes(2);
return new_result;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(ToString(result), ToString(expected_result));
};
Write(cache_file_path_, ToString(result1_));
check_autotune_cache_hit(dot_, config_, result1_);
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path_, kPlaceholderContent);
check_autotune_cache_hit(dot_, config_, result1_);
EXPECT_EQ(Read(cache_file_path_), kPlaceholderContent);
}
TEST_F(FileBasedCacheTest,
IsInCacheReturnsTrueIfTheResultIsInTheFileBasedCache) {
Write(cache_file_path_, ToString(result1_));
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(cache_key_, config_));
EXPECT_TRUE(is_in_cache);
}
TEST_F(FileBasedCacheTest, IsInCacheReturnsFalseIfTheResultIsNotInEitherCache) {
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(cache_key_, config_));
EXPECT_FALSE(is_in_cache);
}
TEST_F(FileBasedCacheTest, AddResultAddsTheResultToTheFileBasedCache) {
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_TRUE(added);
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
}
TEST_F(FileBasedCacheTest, RepeatedAddResultDoesNotWriteTheFileAgain) {
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_TRUE(added);
}
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path_, kPlaceholderContent);
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_FALSE(added);
}
EXPECT_EQ(Read(cache_file_path_), kPlaceholderContent);
}
}
}
} | 2,062 |
#ifndef XLA_SERVICE_GPU_BUFFER_COMPARATOR_H_
#define XLA_SERVICE_GPU_BUFFER_COMPARATOR_H_
#include "absl/status/statusor.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#if TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
namespace xla::gpu {
class BufferComparator {
public:
BufferComparator(const BufferComparator&) = delete;
BufferComparator(BufferComparator&&) = default;
BufferComparator(const Shape& shape, const HloModuleConfig& config,
double tolerance = 0.1);
absl::StatusOr<bool> CompareEqual(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const;
private:
template <typename ElementT, typename ComparisonT>
absl::StatusOr<bool> CompareEqualParameterized(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected,
std::string_view kernel_name,
void* kernel_symbol) const;
template <typename ElementType, typename ComparisonType>
absl::StatusOr<bool> HostCompare(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const;
template <typename ElementT>
absl::StatusOr<bool> DeviceCompare(se::Stream* stream,
se::DeviceMemoryBase current,
se::DeviceMemoryBase expected,
std::string_view kernel_name,
void* kernel_symbol) const;
Shape shape_;
HloModuleConfig config_;
double tolerance_;
};
namespace buffer_comparator {
void* fp8_e4m3fn_comparison();
void* fp8_e5m2_comparison();
#if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60200
void* fp8_e4m3fnuz_comparison();
void* fp8_e5m2fnuz_comparison();
#endif
void* fp16_comparison();
void* bf16_comparison();
void* fp32_comparison();
void* fp64_comparison();
void* int8_comparison();
void* int32_comparison();
}
}
#endif
#include "xla/service/gpu/buffer_comparator.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <string_view>
#include <type_traits>
#include <vector>
#include "Eigen/Core"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
template <typename ElementT>
using ComparisonKernelT =
se::TypedKernel<se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>,
float, uint64_t, se::DeviceMemory<uint64_t>>;
template <typename ElementT>
absl::StatusOr<bool> BufferComparator::DeviceCompare(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected, std::string_view kernel_name,
void* kernel_symbol) const {
se::StreamExecutor* executor = stream->parent();
se::DeviceMemoryHandle out_param(executor,
executor->AllocateScalar<uint64_t>());
TF_RETURN_IF_ERROR(stream->MemZero(out_param.memory_ptr(), sizeof(uint64_t)));
if (current.size() != expected.size()) {
return Internal("Mismatched buffer size: %d bytes vs. %d bytes",
current.size(), expected.size());
}
se::DeviceMemory<ElementT> current_typed(current);
se::DeviceMemory<ElementT> expected_typed(expected);
uint64_t buffer_size = current_typed.ElementCount();
TF_ASSIGN_OR_RETURN(
ComparisonKernelT<ElementT> comparison_kernel,
(se::TypedKernelFactory<
se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>, float,
uint64_t, se::DeviceMemory<uint64_t>>::Create(executor, kernel_name,
kernel_symbol)));
const se::DeviceDescription& gpu_device_info =
executor->GetDeviceDescription();
LaunchDimensions dim = CalculateLaunchDimensions(shape_, gpu_device_info);
se::DeviceMemory<uint64_t> as_uint64(out_param.memory());
TF_RETURN_IF_ERROR(stream->ThenLaunch(
dim.thread_counts_per_block(), dim.block_counts(), comparison_kernel,
current_typed, expected_typed, static_cast<float>(tolerance_),
buffer_size, as_uint64));
uint64_t result = -1;
CHECK_EQ(out_param.memory().size(), sizeof(result));
TF_RETURN_IF_ERROR(
stream->Memcpy(&result, out_param.memory(), sizeof(result)));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
return result == 0;
}
template <typename ElementType, typename ComparisonType>
absl::StatusOr<bool> BufferComparator::HostCompare(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const {
int64_t n = current.size() / sizeof(ElementType);
std::vector<ElementType> host_current(n), host_expected(n);
TF_RETURN_IF_ERROR(
stream->Memcpy(host_current.data(), current, current.size()));
TF_RETURN_IF_ERROR(
stream->Memcpy(host_expected.data(), expected, expected.size()));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
const auto canonicalize = [](ComparisonType a) -> ComparisonType {
if (std::is_same<ElementType, Eigen::half>::value && a) {
constexpr ComparisonType kMaxFp16Value = 65505;
if (std::isnan(a)) {
return a;
}
return std::max(-kMaxFp16Value, std::min(a, kMaxFp16Value));
}
return a;
};
int differences_seen = 0;
for (int64_t i = 0; i < n && differences_seen < 10; ++i) {
auto current_value = static_cast<ComparisonType>(host_current[i]);
auto expected_value = static_cast<ComparisonType>(host_expected[i]);
ComparisonType current_value_canonical = canonicalize(current_value);
ComparisonType expected_value_canonical = canonicalize(expected_value);
if (std::isnan(current_value_canonical) &&
std::isnan(expected_value_canonical)) {
continue;
}
if (std::isinf(current_value_canonical) &&
std::isinf(expected_value_canonical) &&
current_value_canonical == expected_value_canonical) {
continue;
}
if (std::isfinite(current_value_canonical) !=
std::isfinite(expected_value_canonical) ||
!(std::abs(current_value_canonical - expected_value_canonical) /
(std::max(std::abs(current_value_canonical),
std::abs(expected_value_canonical)) +
1) <
tolerance_)) {
++differences_seen;
LOG(ERROR) << "Difference at " << i << ": " << current_value
<< ", expected " << expected_value;
}
}
return differences_seen == 0;
}
template <typename ElementT, typename ComparisonT>
absl::StatusOr<bool> BufferComparator::CompareEqualParameterized(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected, std::string_view kernel_name,
void* kernel_symbol) const {
XLA_SCOPED_LOGGING_TIMER("BufferComparator::CompareEqual");
TF_ASSIGN_OR_RETURN(bool result,
DeviceCompare<ElementT>(stream, current, expected,
kernel_name, kernel_symbol));
if (result) {
return true;
}
TF_ASSIGN_OR_RETURN(bool host_return, (HostCompare<ElementT, ComparisonT>(
stream, current, expected)));
CHECK_EQ(host_return, result)
<< "Host comparison succeeded even though GPU comparison failed.";
return false;
}
absl::StatusOr<bool> BufferComparator::CompareEqual(
se::Stream* stream, se::DeviceMemoryBase current,
se::DeviceMemoryBase expected) const {
switch (shape_.element_type()) {
#if GOOGLE_CUDA
case xla::F8E4M3FN:
return CompareEqualParameterized<tsl::float8_e4m3fn, float>(
stream, current, expected, "fp8_e4m3fn_comparison",
buffer_comparator::fp8_e4m3fn_comparison());
case xla::F8E5M2:
return CompareEqualParameterized<tsl::float8_e5m2, float>(
stream, current, expected, "fp8_e5m2_comparison",
buffer_comparator::fp8_e5m2_comparison());
#endif
#if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60200
case xla::F8E4M3FNUZ:
return CompareEqualParameterized<tsl::float8_e4m3fnuz, float>(
stream, current, expected, "fp8_e4m3fnuz_comparison",
buffer_comparator::fp8_e4m3fnuz_comparison());
case xla::F8E5M2FNUZ:
return CompareEqualParameterized<tsl::float8_e5m2fnuz, float>(
stream, current, expected, "fp8_e5m2fnuz_comparison",
buffer_comparator::fp8_e5m2fnuz_comparison());
#endif
case xla::F16:
return CompareEqualParameterized<Eigen::half, float>(
stream, current, expected, "fp16_comparison",
buffer_comparator::fp16_comparison());
case xla::BF16:
return CompareEqualParameterized<Eigen::bfloat16, float>(
stream, current, expected, "bf16_comparison",
buffer_comparator::bf16_comparison());
case xla::F32:
return CompareEqualParameterized<float, float>(
stream, current, expected, "fp32_comparison",
buffer_comparator::fp32_comparison());
case xla::F64:
return CompareEqualParameterized<double, double>(
stream, current, expected, "fp64_comparison",
buffer_comparator::fp64_comparison());
case xla::S8:
return CompareEqualParameterized<int8_t, float>(
stream, current, expected, "int8_comparison",
buffer_comparator::int8_comparison());
case xla::S32:
return CompareEqualParameterized<int32_t, float>(
stream, current, expected, "int32_comparison",
buffer_comparator::int32_comparison());
default:
return Unimplemented("Unimplemented element type");
}
}
BufferComparator::BufferComparator(const Shape& shape,
const HloModuleConfig& config,
double tolerance)
: shape_(shape), config_(config), tolerance_(tolerance) {
auto double_dim_size = [&]() {
int64_t prev_zero_dim_size = shape_.dimensions(0);
shape_.set_dimensions(0, prev_zero_dim_size * 2);
};
if (shape_.element_type() == PrimitiveType::C64) {
shape_.set_element_type(PrimitiveType::F32);
double_dim_size();
} else if (shape_.element_type() == PrimitiveType::C128) {
shape_.set_element_type(PrimitiveType::F64);
double_dim_size();
}
}
}
} | #include "xla/service/gpu/buffer_comparator.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <limits>
#include <vector>
#include "xla/primitive_util.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/types.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
constexpr double kDefaultTolerance = 0.1;
class BufferComparatorTest : public testing::Test {
protected:
BufferComparatorTest()
#if GOOGLE_CUDA
: platform_(se::PlatformManager::PlatformWithName("CUDA").value()),
#elif TENSORFLOW_USE_ROCM
: platform_(se::PlatformManager::PlatformWithName("ROCM").value()),
#endif
stream_exec_(platform_->ExecutorForDevice(0).value()) {
}
template <typename ElementType>
bool CompareEqualBuffers(const std::vector<ElementType>& current,
const std::vector<ElementType>& expected,
double tolerance) {
auto stream = stream_exec_->CreateStream().value();
se::DeviceMemoryHandle current_buffer(
stream_exec_, stream_exec_->AllocateArray<ElementType>(current.size()));
se::DeviceMemoryHandle expected_buffer(
stream_exec_,
stream_exec_->AllocateArray<ElementType>(expected.size()));
TF_CHECK_OK(stream->Memcpy(current_buffer.memory_ptr(), current.data(),
current_buffer.memory().size()));
TF_CHECK_OK(stream->Memcpy(expected_buffer.memory_ptr(), expected.data(),
expected_buffer.memory().size()));
TF_CHECK_OK(stream->BlockHostUntilDone());
BufferComparator comparator(
ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<ElementType>(),
{static_cast<int64_t>(current.size())}),
HloModuleConfig(), tolerance);
return comparator
.CompareEqual(stream.get(), current_buffer.memory(),
expected_buffer.memory())
.value();
}
template <typename ElementType>
bool CompareEqualFloatBuffers(const std::vector<float>& lhs_float,
const std::vector<float>& rhs_float,
double tolerance = kDefaultTolerance) {
std::vector<ElementType> lhs(lhs_float.begin(), lhs_float.end());
std::vector<ElementType> rhs(rhs_float.begin(), rhs_float.end());
return CompareEqualBuffers(lhs, rhs, tolerance);
}
template <typename ElementType>
bool CompareEqualComplex(const std::vector<std::complex<ElementType>>& lhs,
const std::vector<std::complex<ElementType>>& rhs) {
return CompareEqualBuffers<std::complex<ElementType>>(lhs, rhs,
kDefaultTolerance);
}
se::Platform* platform_;
se::StreamExecutor* stream_exec_;
};
TEST_F(BufferComparatorTest, TestComplex) {
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));
EXPECT_TRUE(CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}},
{{0.1, 0.2}, {2.2, 3.3}}));
EXPECT_TRUE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 3}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 3}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));
EXPECT_FALSE(
CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 6}, {2, 3}}));
EXPECT_TRUE(CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}},
{{0.1, 0.2}, {2.2, 3.3}}));
EXPECT_FALSE(
CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 7}}));
}
TEST_F(BufferComparatorTest, TestNaNs) {
EXPECT_TRUE(
CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")},
{std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {1.}));
EXPECT_TRUE(
CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(
CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({std::nanf("")}, {1.}));
EXPECT_TRUE(
CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("")}));
EXPECT_TRUE(
CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("1234")}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({std::nanf("")}, {1.}));
}
TEST_F(BufferComparatorTest, TestInfs) {
const auto inf = std::numeric_limits<float>::infinity();
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {inf}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {65504}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {65504}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-20}));
#if GOOGLE_CUDA
EXPECT_TRUE(
CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {inf}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {448}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-448}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-20}));
EXPECT_FALSE(
CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {std::nanf("")}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-inf}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {57344}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-57344}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {20}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-20}));
#endif
}
TEST_F(BufferComparatorTest, TestNumbers) {
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<float>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<float>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<double>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<double>({10}, {9}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {101}));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({100}, {120}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {120}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({90}, {120}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({0}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({90}, {100}));
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {90}));
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({-128}, {127}));
#if GOOGLE_CUDA
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {26.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {20.1}));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {30.0}, 0.2));
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0.9}, {1}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({11}, {12}));
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({12}, {11}));
#endif
}
TEST_F(BufferComparatorTest, TestMultiple) {
{
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<float>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<float>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<float>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<double>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<double>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<double>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({20, 30, 40, 50, 60},
{21, 31, 41, 51, 61}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
#if GOOGLE_CUDA
{
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
{
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>(
{20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));
std::vector<float> lhs(200);
std::vector<float> rhs(200);
for (int i = 0; i < 200; i++) {
EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs))
<< "should be the same at index " << i;
lhs[i] = 3;
rhs[i] = 5;
EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs))
<< "should be the different at index " << i;
lhs[i] = 0;
rhs[i] = 0;
}
}
#endif
}
TEST_F(BufferComparatorTest, BF16) {
const int element_count = 3123;
int64_t rng_state = 0;
auto stream = stream_exec_->CreateStream().value();
se::DeviceMemoryHandle lhs(
stream_exec_,
stream_exec_->AllocateArray<Eigen::bfloat16>(element_count));
InitializeBuffer(stream.get(), BF16, &rng_state, lhs.memory());
se::DeviceMemoryHandle rhs(
stream_exec_,
stream_exec_->AllocateArray<Eigen::bfloat16>(element_count));
InitializeBuffer(stream.get(), BF16, &rng_state, rhs.memory());
BufferComparator comparator(ShapeUtil::MakeShape(BF16, {element_count}),
HloModuleConfig());
EXPECT_FALSE(comparator.CompareEqual(stream.get(), lhs.memory(), rhs.memory())
.value());
}
}
}
} | 2,063 |
#ifndef XLA_SERVICE_GPU_GPU_ASYNC_COLLECTIVE_ANNOTATOR_H_
#define XLA_SERVICE_GPU_GPU_ASYNC_COLLECTIVE_ANNOTATOR_H_
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
class GpuAsyncCollectiveAnnotator : public HloModulePass {
public:
explicit GpuAsyncCollectiveAnnotator(HloPredicate is_collective_async)
: is_collective_async_(std::move(is_collective_async)) {}
absl::string_view name() const override {
return "gpu-async-collective-annotator";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloPredicate is_collective_async_;
};
}
}
#endif
#include "xla/service/gpu/gpu_async_collective_annotator.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> GpuAsyncCollectiveAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (!hlo_query::IsAsyncCollectiveStartOp(instruction)) {
continue;
}
CollectiveBackendConfig config;
config.set_is_sync(!is_collective_async_(instruction));
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
instruction->backend_config<GpuBackendConfig>());
*gpu_config.mutable_collective_backend_config() = config;
TF_RETURN_IF_ERROR(instruction->set_backend_config(gpu_config));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_async_collective_annotator.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
addf32 {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
addf16 {
p0 = f16[] parameter(0)
p1 = f16[] parameter(1)
ROOT add = f16[] add(p0, p1)
}
reduce_scatterf32 {
p0 = f32[2] parameter(0)
ROOT result = f32[1] reduce-scatter(p0), replica_groups={},
dimensions={0}, to_apply=addf32
}
ENTRY entry {
pf32 = f32[1] parameter(0)
pf16 = f16[1] parameter(1)
arf32-start = f32[1] all-reduce-start(pf32), to_apply=addf32
arf32-done = f32[1] all-reduce-done(arf32-start)
arf16-start = f16[1] all-reduce-start(pf16), to_apply=addf16
arf16-done = f16[1] all-reduce-done(arf16-start)
agf32-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}
agf32-done = f32[2] all-gather-done(agf32-start)
agf16-start = (f16[1], f16[2]) all-gather-start(pf16), dimensions={0}
agf16-done = f16[2] all-gather-done(agf16-start)
cpf32-start = (f32[1], f32[1], u32[], u32[]) collective-permute-start(pf32),
source_target_pairs={{0,1}, {1,0}}
cpf32-done = f32[1] collective-permute-done(cpf32-start)
cpf16-start = (f16[1], f16[1], u32[], u32[]) collective-permute-start(pf16),
source_target_pairs={{0,1}, {1,0}}
cpf16-done = f16[1] collective-permute-done(cpf16-start)
rsf32-start = ((f32[2]), f32[1]) async-start(agf32-done), calls=reduce_scatterf32
rsf32-done = f32[1] async-done(rsf32-start), calls=reduce_scatterf32
ROOT tuple = (f32[1], f16[1], f32[2], f16[2], f32[1], f16[1], f32[1])
tuple(arf32-done, arf16-done, agf32-done, agf16-done, cpf32-done,
cpf16-done, rsf32-done)
}
)";
struct TestCase {
std::string test_name;
HloPredicate is_async_predicate;
absl::flat_hash_set<absl::string_view> expected_async;
absl::flat_hash_set<absl::string_view> expected_sync;
};
class GpuAsyncCollectiveAnnotatorTest
: public HloTestBase,
public ::testing::WithParamInterface<TestCase> {};
XLA_TEST_P(GpuAsyncCollectiveAnnotatorTest, Test) {
const TestCase& test_case = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString, 2));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, GpuAsyncCollectiveAnnotator(test_case.is_async_predicate)
.Run(module.get()));
EXPECT_TRUE(changed);
for (const HloInstruction* hlo :
module->entry_computation()->instructions()) {
if (!hlo_query::IsAsyncCollectiveStartOp(hlo)) {
continue;
}
auto gpu_config = hlo->backend_config<GpuBackendConfig>();
ASSERT_TRUE(gpu_config.ok());
const CollectiveBackendConfig& backend_config =
gpu_config.value().collective_backend_config();
if (test_case.expected_async.contains(hlo->name())) {
EXPECT_FALSE(backend_config.is_sync());
}
if (test_case.expected_sync.contains(hlo->name())) {
EXPECT_TRUE(backend_config.is_sync());
}
}
}
std::vector<TestCase> TestCases() {
HloPredicate is_f16 = [](const HloInstruction* hlo) {
return hlo->operand(0)->shape().element_type() == PrimitiveType::F16;
};
return {
{"all_async",
HloPredicateTrue,
{"arf32-start", "arf16-start", "agf32-start", "agf16-start",
"cpf32-start", "cpf16-start", "rsf32-start"},
{}},
{"all_sync",
HloPredicateFalse,
{},
{"arf32-start", "arf16-start", "agf32-start", "agf16-start",
"cpf32-start", "cpf16-start", "rsf32-start"}},
{"ar_async",
HloPredicateIsOp<HloOpcode::kAllReduceStart>,
{"arf32-start", "arf16-start"},
{"agf32-start", "agf16-start", "cpf32-start", "cpf16-start",
"rsf32-start"}},
{"cp_async",
HloPredicateIsOp<HloOpcode::kCollectivePermuteStart>,
{"cpf32-start", "cpf16-start"},
{"arf32-start", "arf16-start", "agf32-start", "agf16-start",
"rsf32-start"}},
{"f16_async",
is_f16,
{"arf16-start", "agf16-start", "cpf16-start"},
{"arf32-start", "agf32-start", "cpf32-start", "rsf32-start"}},
};
}
std::string TestCaseName(const ::testing::TestParamInfo<TestCase>& test_case) {
return test_case.param.test_name;
}
INSTANTIATE_TEST_SUITE_P(GpuAsyncCollectiveAnnotatorTest,
GpuAsyncCollectiveAnnotatorTest,
::testing::ValuesIn(TestCases()), TestCaseName);
}
}
} | 2,064 |
#ifndef XLA_SERVICE_GPU_BUFFER_ALLOCATIONS_H_
#define XLA_SERVICE_GPU_BUFFER_ALLOCATIONS_H_
#include <cstddef>
#include <set>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
namespace xla {
namespace gpu {
class BufferAllocations {
public:
BufferAllocations(absl::Span<se::DeviceMemoryBase const> buffers,
int device_ordinal,
se::DeviceMemoryAllocator* memory_allocator)
: buffers_(buffers.begin(), buffers.end()),
device_ordinal_(device_ordinal),
memory_allocator_(memory_allocator) {}
BufferAllocations(BufferAllocations&& other) = default;
BufferAllocations& operator=(BufferAllocations&& other) = default;
BufferAllocations(const BufferAllocations&) = delete;
BufferAllocations& operator=(const BufferAllocations&) = delete;
se::DeviceMemoryAllocator* memory_allocator() const {
return memory_allocator_;
}
int device_ordinal() const { return device_ordinal_; }
se::DeviceMemoryBase GetDeviceAddress(
BufferAllocation::Index buffer_index) const;
se::DeviceMemoryBase& GetMutableDeviceAddress(
BufferAllocation::Index buffer_index);
se::DeviceMemoryBase GetDeviceAddress(
const BufferAllocation::Slice& buffer_slice) const;
absl::Status TearDown(const std::set<se::DeviceMemoryBase>& live_addresses,
absl::Span<const BufferAllocation> allocations);
std::string ToString() const {
std::string out;
for (BufferAllocation::Index i = 0; i < buffers_.size(); ++i) {
const auto& buf = buffers_[i];
absl::StrAppendFormat(&out, "Buffer %d -> %p (%d B)", i, buf.opaque(),
buf.size());
}
return out;
}
size_t size() const { return buffers_.size(); }
private:
std::vector<se::DeviceMemoryBase> buffers_;
int device_ordinal_;
se::DeviceMemoryAllocator* memory_allocator_;
};
}
}
#endif
#include "xla/service/gpu/buffer_allocations.h"
#include <cstdint>
#include <set>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/stream_executor/device_memory.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::Status BufferAllocations::TearDown(
const std::set<se::DeviceMemoryBase>& live_addresses,
absl::Span<const BufferAllocation> allocations) {
absl::Status status;
const int64_t num_buffers = allocations.size();
for (BufferAllocation::Index i = 0; i < num_buffers; ++i) {
const BufferAllocation& allocation = allocations[i];
se::DeviceMemoryBase buffer_address = GetDeviceAddress(allocation.index());
if ((allocation.maybe_live_out() &&
!live_addresses.count(buffer_address)) ||
allocation.IsPreallocatedTempBuffer()) {
auto dealloc_result =
memory_allocator_->Deallocate(device_ordinal_, buffer_address);
if (!dealloc_result.ok() && status.ok()) {
status = dealloc_result;
}
}
}
return status;
}
se::DeviceMemoryBase BufferAllocations::GetDeviceAddress(
BufferAllocation::Index buffer_index) const {
CHECK_GE(buffer_index, 0);
CHECK_LT(buffer_index, buffers_.size());
return buffers_[buffer_index];
}
se::DeviceMemoryBase& BufferAllocations::GetMutableDeviceAddress(
BufferAllocation::Index buffer_index) {
CHECK_GE(buffer_index, 0);
CHECK_LT(buffer_index, buffers_.size());
return buffers_[buffer_index];
}
se::DeviceMemoryBase BufferAllocations::GetDeviceAddress(
const BufferAllocation::Slice& buffer_slice) const {
int64_t index = buffer_slice.index();
se::DeviceMemoryBase base = GetDeviceAddress(index);
int64_t offset = buffer_slice.offset();
CHECK_LE(buffer_slice.offset(), base.size())
<< "slice offset " << offset << " must be smaller than buffer #" << index
<< " size " << base.size();
int64_t extent = offset + buffer_slice.size();
CHECK_LE(extent, base.size())
<< "slice extent " << extent << " must be smaller than buffer #" << index
<< " size " << base.size();
return base.GetByteSlice(buffer_slice.offset(), buffer_slice.size());
}
}
} | #include "xla/service/cpu/runtime/buffer_allocations.h"
#include <cstddef>
#include <vector>
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(BufferAllocationsTest, GetDeviceAddress) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc(0, size_in_bytes, 0);
BufferAllocation::Slice slice(&alloc, 2 * sizeof(float),
sizeof(float));
TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase alloc_mem,
allocations.GetDeviceAddress(0));
EXPECT_EQ(alloc_mem.opaque(), &data[0]);
TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase slice_mem,
allocations.GetDeviceAddress(slice));
EXPECT_EQ(slice_mem.opaque(), &data[2]);
}
}
} | 2,065 |
#ifndef XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
#define XLA_SERVICE_GPU_STREAM_EXECUTOR_UTIL_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string_view>
#include <tuple>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/layout.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
absl::StatusOr<se::dnn::VersionInfo> GetDnnVersionInfo(
stream_executor::StreamExecutor* stream_exec);
se::dnn::VersionInfo GetDnnVersionInfoOrDefault(
stream_executor::StreamExecutor* stream_exec,
se::dnn::VersionInfo fallback_version = se::dnn::VersionInfo{0, 0, 0});
absl::StatusOr<std::tuple<Layout, Layout, Layout>>
StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,
se::dnn::DataLayout input,
se::dnn::FilterLayout filter,
se::dnn::DataLayout output);
absl::StatusOr<
std::tuple<se::dnn::DataLayout, se::dnn::FilterLayout, se::dnn::DataLayout>>
XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output);
std::tuple<std::optional<int64_t>, std::optional<int64_t>,
std::optional<int64_t>>
FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output);
absl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec);
absl::StatusOr<std::unique_ptr<se::Kernel>> CreateKernel(
absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx,
absl::Span<const uint8_t> cubin_data, se::StreamExecutor* stream_exec,
uint32_t shared_mem_bytes = 0);
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
se::Stream* stream);
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
const se::ClusterDim& cluster_dim,
se::Stream* stream);
void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,
int64_t* rng_state, se::DeviceMemoryBase buffer);
absl::StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind(
CudnnConvKind kind);
absl::StatusOr<se::dnn::NormKind> GetDNNNormKindFromCudnnNormKind(
CudnnNormKind kind);
absl::StatusOr<se::dnn::FMHAMaskKind> GetDNNFmhaMaskKindFromCudnnFmhaMaskKind(
CudnnfMHAMaskKind kind);
absl::StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType(
PrimitiveType type);
absl::StatusOr<AutotuneResult> PickBestResult(
absl::Span<AutotuneResult const> profile_results,
std::optional<std::string_view> instr_str,
HloModuleConfig hlo_module_config);
bool RequireDeterminism(const HloModuleConfig& config);
}
}
#endif
#include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <sstream>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/data_type.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/util/env_var.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<se::dnn::VersionInfo> GetDnnVersionInfo(
stream_executor::StreamExecutor* stream_exec) {
if (!stream_exec) {
return absl::InvalidArgumentError("StreamExecutor is null");
}
stream_executor::dnn::DnnSupport* dnn = stream_exec->AsDnn();
if (!dnn) {
return absl::FailedPreconditionError(
"DNN library initialization failed. Look at the errors above for more "
"details.");
}
return dnn->GetVersion();
}
se::dnn::VersionInfo GetDnnVersionInfoOrDefault(
stream_executor::StreamExecutor* stream_exec,
se::dnn::VersionInfo fallback_version) {
return GetDnnVersionInfo(stream_exec).value_or(fallback_version);
}
namespace {
using se::dnn::DataLayout;
using se::dnn::DataLayoutString;
using se::dnn::FilterLayout;
using se::dnn::FilterLayoutString;
int64_t FindMissingDnum(absl::Span<const int64_t> vals) {
for (int i = 0; i < vals.size(); i++) {
if (!absl::c_linear_search(vals, i)) {
return i;
}
}
return vals.size();
}
absl::StatusOr<Layout> DataLayoutToXlaLayout(
DataLayout data_layout, int64_t batch_dimension, int64_t feature_dimension,
absl::Span<int64_t const> spatial_dimensions) {
std::vector<int64_t> layout;
switch (data_layout) {
case DataLayout::kBatchDepthYX:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
break;
case DataLayout::kBatchDepthYX4:
case DataLayout::kBatchDepthYX32:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(FindMissingDnum(layout));
break;
case DataLayout::kBatchYXDepth:
layout.push_back(batch_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(feature_dimension);
break;
default:
return Internal("Invalid layout %s", DataLayoutString(data_layout));
}
return LayoutUtil::MakeLayoutFromMajorToMinor(layout);
}
}
absl::StatusOr<std::tuple<Layout, Layout, Layout>>
StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,
DataLayout input, FilterLayout filter,
DataLayout output) {
TF_ASSIGN_OR_RETURN(
Layout input_layout,
DataLayoutToXlaLayout(input, dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()));
TF_ASSIGN_OR_RETURN(
Layout output_layout,
DataLayoutToXlaLayout(input, dnums.output_batch_dimension(),
dnums.output_feature_dimension(),
dnums.output_spatial_dimensions()));
std::vector<int64_t> filter_layout;
switch (filter) {
case FilterLayout::kOutputInputYX:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
break;
case FilterLayout::kOutputInputYX4:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(FindMissingDnum(filter_layout));
break;
case FilterLayout::kOutputYXInput:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
break;
default:
return Internal("Invalid filter layout %s for conv with dnums %s,",
FilterLayoutString(filter),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout,
LayoutUtil::MakeLayoutFromMajorToMinor(filter_layout),
output_layout);
}
absl::StatusOr<std::tuple<DataLayout, FilterLayout, DataLayout>>
XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
CHECK(input.has_layout());
CHECK(filter.has_layout());
CHECK(output.has_layout());
Layout nchw_input, nchw_filter, nchw_output;
std::tie(nchw_input, nchw_filter, nchw_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX,
FilterLayout::kOutputInputYX,
DataLayout::kBatchDepthYX)
.value();
Layout nchw_vect_input, nchw_vect_filter, nchw_vect_output;
std::tie(nchw_vect_input, nchw_vect_filter, nchw_vect_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX4,
FilterLayout::kOutputInputYX4,
DataLayout::kBatchDepthYX4)
.value();
Layout nhwc_input, nhwc_filter, nhwc_output;
std::tie(nhwc_input, nhwc_filter, nhwc_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchYXDepth,
FilterLayout::kOutputYXInput,
DataLayout::kBatchYXDepth)
.value();
DataLayout input_layout;
if (LayoutUtil::Equal(input.layout(), nchw_input)) {
input_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(input.layout(), nchw_vect_input)) {
int64_t vect_size = input.dimensions(input.layout().minor_to_major(0));
if (vect_size == 4) {
input_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
input_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid input shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(input),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(input.layout(), nhwc_input)) {
input_layout = DataLayout::kBatchYXDepth;
} else {
return Internal(
"Invalid input layout %s for conv with dnums %s; expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(input.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_input.ToString(),
nchw_vect_input.ToString(), nhwc_input.ToString());
}
FilterLayout filter_layout;
if (LayoutUtil::Equal(filter.layout(), nchw_filter)) {
filter_layout = FilterLayout::kOutputInputYX;
} else if (LayoutUtil::Equal(filter.layout(), nchw_vect_filter)) {
int64_t vect_size = filter.dimensions(filter.layout().minor_to_major(0));
if (vect_size == 4) {
filter_layout = FilterLayout::kOutputInputYX4;
} else if (vect_size == 32) {
filter_layout = FilterLayout::kOutputInputYX32;
} else {
return Internal(
"Invalid filter shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(filter),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(filter.layout(), nhwc_filter)) {
filter_layout = FilterLayout::kOutputYXInput;
} else {
return Internal(
"Invalid filter layout %s for conv with dnums %s, expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(filter.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_filter.ToString(),
nchw_vect_filter.ToString(), nhwc_filter.ToString());
}
DataLayout output_layout;
if (LayoutUtil::Equal(output.layout(), nchw_output)) {
output_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(output.layout(), nchw_vect_output)) {
int64_t vect_size = output.dimensions(output.layout().minor_to_major(0));
if (vect_size == 4) {
output_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
output_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid output shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(output),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(output.layout(), nhwc_output)) {
output_layout = DataLayout::kBatchYXDepth;
} else {
return Internal("Invalid output layout %s for conv with dnums %s",
LayoutUtil::HumanString(output.layout()),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout, filter_layout, output_layout);
}
static std::optional<int64_t> FindVectorizedDim(int64_t rank, int64_t d0,
int64_t d1,
absl::Span<const int64_t> ds) {
for (int64_t i = 0; i < rank; i++) {
if (i == d0 || i == d1 || absl::c_linear_search(ds, i)) {
continue;
}
return i;
}
return std::nullopt;
}
std::tuple<std::optional<int64_t>, std::optional<int64_t>,
std::optional<int64_t>>
FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
return {
FindVectorizedDim(input.dimensions_size(), dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()),
FindVectorizedDim(filter.dimensions_size(),
dnums.kernel_input_feature_dimension(),
dnums.kernel_output_feature_dimension(),
dnums.kernel_spatial_dimensions()),
FindVectorizedDim(
output.dimensions_size(), dnums.output_batch_dimension(),
dnums.output_feature_dimension(), dnums.output_spatial_dimensions()),
};
}
absl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec) {
static absl::Mutex mu(absl::kConstInit);
static auto* mutexes =
new std::map<std::pair<const se::Platform*, int64_t>,
absl::Mutex>();
absl::MutexLock global_lock(&mu);
auto it = mutexes
->emplace(std::piecewise_construct,
std::make_tuple(stream_exec->GetPlatform(),
stream_exec->device_ordinal()),
std::make_tuple())
.first;
return it->second;
}
absl::StatusOr<std::unique_ptr<se::Kernel>> CreateKernel(
absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx,
absl::Span<const uint8_t> cubin_data, se::StreamExecutor* stream_exec,
uint32_t shared_mem_bytes) {
se::MultiKernelLoaderSpec loader_spec(num_args);
loader_spec.AddCudaPtxInMemory(ptx, kernel_name);
if (!cubin_data.empty()) {
loader_spec.AddCudaCubinInMemory(cubin_data, kernel_name);
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
se::KernelFactory::Create(stream_exec, loader_spec));
se::KernelMetadata m;
m.set_shared_memory_bytes(shared_mem_bytes);
kernel->set_metadata(m);
return kernel;
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
kernel, *kernel_args);
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
const se::ClusterDim& cluster_dim,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
cluster_dim, kernel, *kernel_args);
}
template <typename T, typename Generator>
typename std::enable_if<std::is_integral<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) =
delete;
template <typename T, typename Generator>
typename std::enable_if<std::is_floating_point<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) {
return std::uniform_real_distribution<T>(lhs, rhs)(*gen);
}
namespace repeat_buffer_kernel {
void* kernel();
}
template <typename T>
static void InitializeTypedBuffer(se::Stream* stream,
se::DeviceMemoryBase buffer,
int64_t* rng_state) {
constexpr int host_buffer_size = 10069;
static std::vector<T>* host_buffer = [] {
auto* ret = new std::vector<T>(host_buffer_size);
std::mt19937 gen;
for (auto& element : *ret) {
constexpr bool kIsIntegral = std::numeric_limits<T>::is_integer;
constexpr bool kIsLowRange =
!kIsIntegral && std::numeric_limits<T>::max_exponent <=
std::numeric_limits<Eigen::half>::max_exponent;
using RandomType = typename std::conditional<std::is_same_v<T, double>,
double, float>::type;
auto upper_bound = RandomType(kIsLowRange ? 0.1 : 1.0);
auto rand_val = UniformDistribution(RandomType(0), upper_bound, &gen);
element = T(kIsIntegral ? rand_val + 0.5 : rand_val);
}
return ret;
}();
CHECK_EQ(0, buffer.size() % sizeof(T));
int64_t elements_to_fill = buffer.size() / sizeof(T);
int64_t host_index = *rng_state;
CHECK_LT(host_index, host_buffer_size);
*rng_state = (*rng_state + elements_to_fill) % host_buffer_size;
int64_t first_size =
std::min<int64_t>(host_buffer_size - host_index, elements_to_fill);
TF_CHECK_OK(stream->Memcpy(&buffer, host_buffer->data() + host_index,
first_size * sizeof(T)));
elements_to_fill -= first_size;
if (elements_to_fill == 0) {
return;
}
int64_t second_size = std::min<int64_t>(host_index, elements_to_fill);
CHECK_LE(first_size + second_size, host_buffer_size);
se::DeviceMemoryBase mem =
buffer.GetByteSlice(first_size * sizeof(T), second_size * sizeof(T));
TF_CHECK_OK(stream->Memcpy(&mem, host_buffer->data(), mem.size()));
elements_to_fill -= second_size;
if (elements_to_fill == 0) {
return;
}
#ifdef GOOGLE_CUDA
CHECK_EQ(elements_to_fill, buffer.size() / sizeof(T) - host_buffer_size);
se::StreamExecutor* executor = stream->parent();
auto kernel =
se::TypedKernelFactory<se::DeviceMemoryBase, int64_t, int64_t>::Create(
executor, "RepeatBufferKernel", repeat_buffer_kernel::kernel());
if (!kernel.ok()) {
LOG(FATAL) << "Could not create RepeatBufferKernel: " << kernel.status();
}
constexpr int64_t host_buffer_bytes = host_buffer_size * sizeof(T);
constexpr int threads_per_block = 256;
constexpr int blocks_per_grid =
(host_buffer_bytes + threads_per_block - 1) / threads_per_block;
TF_CHECK_OK(stream->ThenLaunch(se::ThreadDim(threads_per_block, 1, 1),
se::BlockDim(blocks_per_grid, 1, 1), *kernel,
buffer, host_buffer_bytes,
static_cast<int64_t>(buffer.size())));
#endif
}
void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,
int64_t* rng_state, se::DeviceMemoryBase buffer) {
return primitive_util::PrimitiveTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant) ||
primitive_util::IsIntegralType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<NativeT>(stream, buffer, rng_state);
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<typename NativeT::value_type>(
stream, buffer, rng_state);
}
if constexpr (primitive_type_constant == PRED) {
return InitializeTypedBuffer<int8_t>(stream, buffer, rng_state);
}
LOG(FATAL) << "Unexpected type: "
<< primitive_util::LowercasePrimitiveTypeName(buffer_type);
},
buffer_type);
}
absl::StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind(
CudnnConvKind kind) {
switch (kind) {
case CudnnConvKind::kBackwardFilter:
return se::dnn::BACKWARD_FILTER;
case CudnnConvKind::kBackwardInput:
return se::dnn::BACKWARD_DATA;
case CudnnConvKind::kForward:
return se::dnn::FORWARD;
case CudnnConvKind::kForwardActivation:
return se::dnn::FORWARD_BIAS_ACTIVATION;
case CudnnConvKind::kForwardGraph:
return se::dnn::FORWARD_GRAPH;
default:
break;
}
return Internal("Unexpected convolution kind");
}
absl::StatusOr<se::dnn::NormKind> GetDNNNormKindFromCudnnNormKind(
CudnnNormKind kind) {
switch (kind) {
case CudnnNormKind::kLayerForwardInfer:
return se::dnn::LAYER_FWD_INFER;
case CudnnNormKind::kLayerForwardTrain:
return se::dnn::LAYER_FWD_TRAIN;
case CudnnNormKind::kLayerBackward:
return se::dnn::LAYER_BWD;
default:
return Internal("Unexpected norm kind");
}
}
absl::StatusOr<se::dnn::FMHAMaskKind> GetDNNFmhaMaskKindFromCudnnFmhaMaskKind(
CudnnfMHAMaskKind kind) {
switch (kind) {
case CudnnfMHAMaskKind::kNoMask:
return se::dnn::NO_MASK;
case CudnnfMHAMaskKind::kPadding:
return se::dnn::PADDING;
case CudnnfMHAMaskKind::kCausal:
return se::dnn::CAUSAL;
case CudnnfMHAMaskKind::kPaddingCausal:
return se::dnn::PADDING_CAUSAL;
case CudnnfMHAMaskKind::kAlibi:
return se::dnn::ALIBI;
default:
return Internal("Unexpected fmha mask kind");
}
}
absl::StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType(
PrimitiveType type) {
switch (type) {
case F16:
return se::dnn::ToDataType<Eigen::half>::value;
case F32:
return se::dnn::ToDataType<float>::value;
case F64:
return se::dnn::ToDataType<double>::value;
case S8:
return se::dnn::ToDataType<int8_t>::value;
case S32:
return se::dnn::ToDataType<int32_t>::value;
case BF16:
return se::dnn::ToDataType<Eigen::bfloat16>::value;
case F8E4M3FN:
return se::dnn::ToDataType<tsl::float8_e4m3fn>::value;
case F8E5M2:
return se::dnn::ToDataType<tsl::float8_e5m2>::value;
default:
break;
}
return Internal("Unsupported datatype");
}
bool RequireDeterminism(const HloModuleConfig& config) {
static bool require_cudnn_determinism = [] {
bool cudnn_deterministic = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_CUDNN_DETERMINISTIC",
false,
&cudnn_deterministic));
return cudnn_deterministic;
}();
return require_cudnn_determinism ||
config.debug_options().xla_gpu_deterministic_ops();
}
namespace {
std::vector<AutotuneResult> KeepNonFailures(
absl::Span<AutotuneResult const> profile_results) { | #include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "xla/autotuning.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tsl/util/proto/proto_utils.h"
namespace xla::gpu {
namespace {
struct Result {
int64_t run_time_ns;
int64_t scratch_bytes;
bool operator==(const Result& other) const {
return other.run_time_ns == run_time_ns &&
other.scratch_bytes == scratch_bytes;
};
explicit operator AutotuneResult() const {
AutotuneResult result;
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Nanoseconds(run_time_ns));
result.set_scratch_bytes(scratch_bytes);
return result;
}
};
static Result ATRToResult(AutotuneResult atr) {
return Result{.run_time_ns = absl::ToInt64Nanoseconds(
tsl::proto_utils::FromDurationProto(atr.run_time())),
.scratch_bytes = atr.scratch_bytes()};
}
std::vector<AutotuneResult> Results(const std::vector<Result>& stats) {
std::vector<AutotuneResult> results;
for (const auto& s : stats) results.push_back(AutotuneResult(s));
return results;
}
TEST(StreamExecutorTest, PickBestResult) {
absl::StatusOr<AutotuneResult> atr;
atr = PickBestResult(Results({{9000, 0}, {1000, 0}, {16000, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({1000, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 0}, {4500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4500, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 2}, {4500, 1}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4700, 0}));
atr = PickBestResult(Results({{5000, 1}, {6000, 0}, {7500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({6000, 0}));
}
}
} | 2,066 |
#ifndef XLA_SERVICE_GPU_AUTOTUNER_COMPILE_UTIL_H_
#define XLA_SERVICE_GPU_AUTOTUNER_COMPILE_UTIL_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
class AutotunerCompileUtil {
public:
using GenerateModuleFn =
absl::AnyInvocable<absl::StatusOr<std::unique_ptr<HloModule>>(
const DebugOptions&)>;
static absl::StatusOr<std::optional<AutotunerCompileUtil>> Create(
const AutotuneConfig& config, const DebugOptions& opts);
struct ProfilingOutput {
ProfilingOutput(absl::Duration duration, ScopedShapedBuffer&& buffer)
: duration(duration), output(std::move(buffer)) {}
absl::Duration duration;
ScopedShapedBuffer output;
};
absl::StatusOr<std::optional<ProfilingOutput>> ProfileExecutable(
Executable* executable, se::Stream* stream,
absl::Span<se::DeviceMemoryBase const> input_buffers,
absl::Span<Shape const> input_shapes);
absl::StatusOr<std::unique_ptr<Executable>> Compile(
GenerateModuleFn extractor);
absl::StatusOr<std::unique_ptr<HloModule>> ExtractModule(
GenerateModuleFn extractor);
private:
AutotunerCompileUtil(const AutotuneConfig& config, Compiler* compiler,
se::StreamExecutor& stream_executor, se::Stream& stream,
se::DeviceMemoryAllocator& allocator,
const DebugOptions& opts);
absl::StatusOr<ExecutionOutput> Execute(Executable& executable,
std::vector<ExecutionInput> arguments,
ExecutionProfile* profile = nullptr);
AutotuneConfig config_;
Compiler* compiler_;
se::StreamExecutor& stream_executor_;
se::Stream& stream_;
se::DeviceMemoryAllocator& allocator_;
DebugOptions opts_;
};
class RedzoneBuffers {
public:
enum BuffersToCreate {
kAllInputs = 0,
kAllInputsAllOutputs = 1,
kAllInputsOutputsNoScratch = 2,
};
static absl::StatusOr<RedzoneBuffers> FromInstruction(
const HloInstruction& instruction, const AutotuneConfig& config,
const DebugOptions& debug_options, BuffersToCreate buffers_to_create);
const std::vector<se::DeviceMemoryBase>& input_buffers() const {
return input_buffers_;
}
const std::vector<Shape>& input_shapes() const { return input_shapes_; }
const std::vector<se::DeviceMemoryBase>& output_buffers() const {
return output_buffers_;
}
const Shape& output_shape() const { return output_shape_; }
se::RedzoneAllocator& RedzoneAllocator() const { return *redzone_allocator_; }
private:
absl::Status CreateInputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
int64_t& rng_state);
absl::Status CreateOutputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
BuffersToCreate buffers_to_create,
int64_t& rng_state);
std::unique_ptr<se::RedzoneAllocator> redzone_allocator_;
std::vector<se::DeviceMemoryBase> input_buffers_;
std::vector<Shape> input_shapes_;
std::vector<se::DeviceMemoryBase> output_buffers_;
Shape output_shape_;
};
}
}
#endif
#include "xla/service/gpu/autotuner_compile_util.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
std::vector<ExecutionInput> ExecutionInputsFromBuffers(
absl::Span<se::DeviceMemoryBase const> buffers,
absl::Span<Shape const> shapes) {
CHECK_EQ(buffers.size(), shapes.size());
std::vector<ExecutionInput> inputs;
for (int i = 0; i < buffers.size(); ++i) {
inputs.emplace_back(shapes.at(i));
inputs.back().SetUnownedBuffer(
{}, MaybeOwningDeviceMemory(buffers.at(i)));
}
return inputs;
}
}
AutotunerCompileUtil::AutotunerCompileUtil(const AutotuneConfig& config,
Compiler* compiler,
se::StreamExecutor& stream_executor,
se::Stream& stream,
se::DeviceMemoryAllocator& allocator,
const DebugOptions& opts)
: config_(config),
compiler_(compiler),
stream_executor_(stream_executor),
stream_(stream),
allocator_(allocator),
opts_(opts) {
opts_.set_xla_enable_dumping(false);
opts_.set_xla_gpu_dump_autotune_results_to("");
opts_.set_xla_gpu_load_autotune_results_from("");
opts_.set_xla_gpu_dump_llvmir(false);
opts_.set_xla_gpu_dump_autotune_logs_to("");
opts_.set_xla_gpu_force_compilation_parallelism(1);
opts_.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);
opts_.clear_xla_gpu_enable_command_buffer();
opts_.set_xla_embed_ir_in_executable(false);
opts_.set_xla_gpu_kernel_cache_file("");
}
absl::StatusOr<std::optional<AutotunerCompileUtil::ProfilingOutput>>
AutotunerCompileUtil::ProfileExecutable(
Executable* executable, se::Stream* stream,
absl::Span<se::DeviceMemoryBase const> input_buffers,
absl::Span<Shape const> input_shapes) {
{
std::vector<ExecutionInput> execution_inputs =
ExecutionInputsFromBuffers(input_buffers, input_shapes);
absl::StatusOr<ExecutionOutput> execution_output =
Execute(*executable, std::move(execution_inputs));
if (!execution_output.ok()) {
if (execution_output.status().code() ==
absl::StatusCode::kResourceExhausted) {
return {std::nullopt};
}
return execution_output.status();
}
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
}
std::vector<ExecutionInput> execution_inputs =
ExecutionInputsFromBuffers(input_buffers, input_shapes);
ExecutionProfile profile;
profile.set_warmup_run_executed(true);
TF_ASSIGN_OR_RETURN(
ExecutionOutput execution_output,
Execute(*executable, std::move(execution_inputs), &profile));
return std::make_optional<ProfilingOutput>(
absl::Nanoseconds(profile.compute_time_ns()),
execution_output.Commit().ConsumeResult());
}
absl::StatusOr<std::unique_ptr<Executable>> AutotunerCompileUtil::Compile(
GenerateModuleFn extractor) {
absl::StatusOr<std::unique_ptr<HloModule>> new_hlo_module = extractor(opts_);
if (new_hlo_module.status().GetPayload(kUncompilableFusion).has_value()) {
return std::unique_ptr<Executable>();
} else if (!new_hlo_module.status().ok()) {
return new_hlo_module.status();
}
absl::StatusOr<std::unique_ptr<Executable>> out = compiler_->RunBackend(
std::move(*new_hlo_module), &stream_executor_,
Compiler::CompileOptions{&allocator_, nullptr,
{},
true});
if (out.status().code() == absl::StatusCode::kResourceExhausted ||
out.status().code() == absl::StatusCode::kCancelled) {
return std::unique_ptr<Executable>();
}
return out;
}
absl::StatusOr<std::unique_ptr<HloModule>> AutotunerCompileUtil::ExtractModule(
GenerateModuleFn extractor) {
return extractor(opts_);
}
absl::StatusOr<std::optional<AutotunerCompileUtil>>
AutotunerCompileUtil::Create(const AutotuneConfig& config,
const DebugOptions& opts) {
if (config.IsDeviceless()) {
return std::nullopt;
}
se::StreamExecutor* stream_exec = config.GetExecutor();
se::DeviceMemoryAllocator* allocator = config.GetAllocator();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config.GetStream());
TF_ASSIGN_OR_RETURN(Compiler * compiler,
Compiler::GetForPlatform(stream_exec->GetPlatform()));
return AutotunerCompileUtil(config, compiler, *stream_exec, *stream,
*allocator, opts);
}
absl::StatusOr<ExecutionOutput> AutotunerCompileUtil::Execute(
Executable& executable, std::vector<ExecutionInput> arguments,
ExecutionProfile* profile) {
GpuExecutableRunOptions gpu_opts;
gpu_opts.set_requires_exclusive_lock_on_gpu();
ExecutableRunOptions run_options;
run_options.set_device_ordinal(stream_executor_.device_ordinal());
run_options.set_stream(&stream_);
run_options.set_allocator(&allocator_);
run_options.set_gpu_executable_run_options(&gpu_opts);
run_options.set_execution_profile(profile);
ServiceExecutableRunOptions service_run_options(run_options);
TF_ASSIGN_OR_RETURN(ExecutionOutput output,
executable.ExecuteAsyncOnStreamWrapper(
&service_run_options, std::move(arguments)));
return std::move(output);
}
absl::StatusOr<RedzoneBuffers> RedzoneBuffers::FromInstruction(
const HloInstruction& instruction, const AutotuneConfig& config,
const DebugOptions& debug_options, BuffersToCreate buffers_to_create) {
RedzoneBuffers buffers;
TF_ASSIGN_OR_RETURN(auto rz_allocator, AutotunerUtil::CreateRedzoneAllocator(
config, debug_options));
buffers.redzone_allocator_ =
std::make_unique<se::RedzoneAllocator>(std::move(rz_allocator));
int64_t rng_state = 0;
TF_RETURN_IF_ERROR(
buffers.CreateInputs(instruction, config, debug_options, rng_state));
if (buffers_to_create == BuffersToCreate::kAllInputsAllOutputs ||
buffers_to_create == BuffersToCreate::kAllInputsOutputsNoScratch) {
TF_RETURN_IF_ERROR(buffers.CreateOutputs(instruction, config, debug_options,
buffers_to_create, rng_state));
}
return buffers;
}
absl::Status RedzoneBuffers::CreateInputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
int64_t& rng_state) {
for (const auto* operand : instruction.operands()) {
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, operand->shape(),
config, rng_state));
input_buffers_.push_back(buf);
input_shapes_.push_back(operand->shape());
}
return absl::OkStatus();
}
absl::Status RedzoneBuffers::CreateOutputs(const HloInstruction& instruction,
const AutotuneConfig& config,
const DebugOptions& debug_options,
BuffersToCreate buffers_to_create,
int64_t& rng_state) {
if (!instruction.shape().IsTuple()) {
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, instruction.shape(),
config, rng_state));
output_buffers_.push_back(buf);
output_shape_ = instruction.shape();
return absl::OkStatus();
}
auto current_shape_it = instruction.shape().tuple_shapes().begin();
auto end = instruction.shape().tuple_shapes().end();
end -= buffers_to_create == kAllInputsAllOutputs ? 0 : 1;
output_shape_ = std::distance(current_shape_it, end) == 1
? output_shape_ = *current_shape_it
: ShapeUtil::MakeTupleShape(
std::vector<Shape>{current_shape_it, end});
for (; current_shape_it < end; current_shape_it++) {
if (current_shape_it->IsTuple()) {
return Unimplemented("Nested tuples are unsupported by RedzoneBuffers.");
}
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase buf,
AutotunerUtil::CreateBuffer(*redzone_allocator_, *current_shape_it,
config, rng_state));
output_buffers_.push_back(buf);
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/autotuner_compile_util.h"
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using AutotunerCompileUtilTest = HloTestBase;
TEST_F(AutotunerCompileUtilTest, VerifyOutputNotATuple) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = f32[1,2,3] custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 1);
EXPECT_EQ(rzb2.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 1);
EXPECT_EQ(rzb3.output_shape(), root.shape());
}
TEST_F(AutotunerCompileUtilTest, VerifyOutputTupleOneElement) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = (f32[1,2,3]) custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 1);
EXPECT_FALSE(rzb2.output_shape().IsTuple());
EXPECT_EQ(rzb2.output_shape(), root.shape().tuple_shapes(0));
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 0);
}
TEST_F(AutotunerCompileUtilTest, VerifyOutputTupleTwoElements) {
constexpr absl::string_view kHlo = R"(
HloModule hlo
ENTRY main {
p0 = f32[2,2] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[6,6] parameter(2)
ROOT root = (f32[1,2,3], u8[1,2]) custom-call(p0, p1, p2), custom_call_target="fake"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
AutotuneConfig autotune_config{DeviceConfig{executors.at(0), nullptr},
GetDebugOptionsForTest()};
auto& root = *module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputs));
EXPECT_EQ(rzb.input_shapes().size(), 3);
EXPECT_EQ(rzb.input_buffers().size(), 3);
EXPECT_EQ(rzb.output_buffers().size(), 0);
EXPECT_NE(rzb.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb2,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsAllOutputs));
EXPECT_EQ(rzb2.input_shapes().size(), 3);
EXPECT_EQ(rzb2.input_buffers().size(), 3);
EXPECT_EQ(rzb2.output_buffers().size(), 2);
EXPECT_TRUE(rzb2.output_shape().IsTuple());
EXPECT_EQ(rzb2.output_shape(), root.shape());
TF_ASSERT_OK_AND_ASSIGN(RedzoneBuffers rzb3,
RedzoneBuffers::FromInstruction(
root, autotune_config, GetDebugOptionsForTest(),
RedzoneBuffers::kAllInputsOutputsNoScratch));
EXPECT_EQ(rzb3.input_shapes().size(), 3);
EXPECT_EQ(rzb3.input_buffers().size(), 3);
EXPECT_EQ(rzb3.output_buffers().size(), 1);
EXPECT_FALSE(rzb3.output_shape().IsTuple());
EXPECT_EQ(rzb3.output_shape(), root.shape().tuple_shapes(0));
}
}
} | 2,067 |
#ifndef XLA_SERVICE_GPU_FUSION_MERGER_H_
#define XLA_SERVICE_GPU_FUSION_MERGER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class FusionMerger : public HloModulePass {
public:
explicit FusionMerger(const se::DeviceDescription& d,
HloCostAnalysis::ShapeSizeFunction f)
: gpu_device_info_(d), shape_size_function_(f) {}
absl::string_view name() const override { return "fusion_merger"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::DeviceDescription gpu_device_info_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
};
}
}
#endif
#include "xla/service/gpu/fusion_merger.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
class FusionInstructionMerger {
public:
explicit FusionInstructionMerger(
HloComputation* computation, const se::DeviceDescription& gpu_device_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function)
: computation_(computation),
shape_size_function_(shape_size_function),
gpu_device_info_(gpu_device_info),
dump_fusion_visualization_(computation->parent()
->config()
.debug_options()
.xla_dump_fusion_visualization()) {}
absl::Status Run();
bool changed() const { return changed_; }
private:
FusionDecision ShouldFuse(HloInstruction* producer);
absl::Status FuseIntoAllUsers(HloInstruction* producer);
HloComputation* computation_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
std::optional<GpuHloCostAnalysis> cost_analysis_;
FusionInfoCache fusion_info_cache_;
const se::DeviceDescription& gpu_device_info_;
bool changed_ = false;
bool dump_fusion_visualization_ = false;
int total_visited_ = 0;
int total_merged_ = 0;
int num_fail_no_users_ = 0;
int num_fail_not_loop_fusion_ = 0;
int num_fail_merge_all_users_ = 0;
int num_fail_inefficient_fusion_emitter_ = 0;
int num_fail_fusion_too_large_ = 0;
int num_fail_uncoalesced_read_ = 0;
int num_fail_slower_if_fused_ = 0;
FusionInstructionMerger(const FusionInstructionMerger&) = delete;
FusionInstructionMerger& operator=(const FusionInstructionMerger&) = delete;
};
absl::Status FusionInstructionMerger::FuseIntoAllUsers(
HloInstruction* producer) {
std::vector<HloInstruction*> users = producer->users();
for (HloInstruction* user : users) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*user,
producer);
}
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(user));
HloInstruction* consumer = user;
if (consumer->opcode() != HloOpcode::kFusion) {
consumer = computation_->AddInstruction(HloInstruction::CreateFusion(
user->shape(), ChooseFusionKind(*producer, *user), user));
TF_CHECK_OK(computation_->ReplaceInstruction(user, consumer));
}
consumer->MergeFusionInstruction(producer);
TF_RETURN_IF_ERROR(cost_analysis_->RevisitInstruction(consumer));
fusion_info_cache_.Invalidate(consumer);
if (dump_fusion_visualization_) {
RegisterFusionState(*computation_,
absl::StrCat("Fused |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*consumer);
}
changed_ = true;
}
CHECK_EQ(0, producer->user_count()) << producer->ToString();
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(producer));
fusion_info_cache_.Invalidate(producer);
VLOG(2) << "Merged fusion instruction: " << producer->name()
<< " into users { "
<< absl::StrJoin(users, ", ",
[](std::string* out, HloInstruction* user) {
absl::StrAppend(out, user->name());
})
<< " }";
return absl::OkStatus();
}
absl::Status FusionInstructionMerger::Run() {
for (HloInstruction* producer : computation_->MakeInstructionPostOrder()) {
if (producer->opcode() != HloOpcode::kFusion) {
continue;
}
FusionDecision should_fuse = ShouldFuse(producer);
if (should_fuse) {
TF_RETURN_IF_ERROR(FuseIntoAllUsers(producer));
++total_merged_;
} else {
VLOG(3) << "Not fusing fusion |" << producer->name()
<< "| with all of it's users due to: " << should_fuse.Explain();
if (dump_fusion_visualization_ && !producer->users().empty()) {
RegisterFusionState(
*computation_,
absl::StrCat(
"Not fusing fusion |", producer->name(),
"| into all of its users due to: ", should_fuse.Explain()),
*producer->users()[0],
producer);
}
}
}
VLOG(1) << "FusionInstructionMerger EXIT"
<< " computation: " << computation_->name()
<< " total_visited: " << total_visited_
<< " total_merged: " << total_merged_ << " merge failures { "
<< " no_users: " << num_fail_no_users_
<< " not_loop_fusion: " << num_fail_not_loop_fusion_
<< " merge_all_users: " << num_fail_merge_all_users_
<< " uncoalesced_read: " << num_fail_uncoalesced_read_
<< " inefficient_fusion_emitter: "
<< num_fail_inefficient_fusion_emitter_
<< " slower_if_fused: " << num_fail_slower_if_fused_
<< " fusion_too_large: " << num_fail_fusion_too_large_ << " }";
return absl::OkStatus();
}
bool TransposesMostData(const HloInstruction& fusion) {
float score = 0;
for (const HloInstruction* instr : fusion.fused_instructions()) {
if (IsPhysicallyTransposing(*instr)) {
score += 1.0 * ShapeUtil::ElementsInRecursive(instr->shape()) /
ShapeUtil::ElementsInRecursive(fusion.shape());
if (score >= 0.5) {
VLOG(3) << fusion.ToString() << " transpose ratio exceeds " << score;
return true;
}
}
}
return false;
}
FusionDecision FusionInstructionMerger::ShouldFuse(HloInstruction* producer) {
++total_visited_;
VLOG(4) << "Considering producer " << producer->name();
if (producer->users().empty()) {
++num_fail_no_users_;
return "fusion has no users";
}
if (!producer->IsLoopFusion()) {
++num_fail_not_loop_fusion_;
return "not a loop fusion";
}
auto producer_hero = GetRealHeroForMultiOutputFusion(*producer);
bool has_reduction_user = false;
for (const HloInstruction* user : producer->users()) {
if (user->opcode() == HloOpcode::kBitcast) {
++num_fail_merge_all_users_;
return "not fusing bitcast ops";
}
if (user->IsCustomFusion()) {
++num_fail_merge_all_users_;
return "not fusing custom fusions";
}
auto consumer_hero = GetRealHeroForMultiOutputFusion(*user);
if (auto compatible =
FusionHeroesAreCompatible(producer_hero, consumer_hero);
!compatible) {
return compatible;
}
FusionDecision fusible = IsProducerConsumerFusible(*producer, *user);
if (!fusible) {
++num_fail_merge_all_users_;
VLOG(9) << user->ToString();
return fusible;
}
if (IsInputFusibleReduction(*user)) {
has_reduction_user = true;
}
}
if (has_reduction_user && TransposesMostData(*producer)) {
++num_fail_uncoalesced_read_;
return "would read mostly uncoalesced";
}
for (const HloInstruction* user : producer->users()) {
FusionDecision fits = FusionFitsInBudget(
*user, *producer, gpu_device_info_,
true, &fusion_info_cache_);
if (!fits) {
++num_fail_fusion_too_large_;
return fits;
}
}
if (!cost_analysis_) {
VLOG(2) << "Running full HLO cost analysis for " << computation_->name();
cost_analysis_.emplace(
GpuHloCostAnalysis::Options{shape_size_function_,
{},
true},
&gpu_device_info_);
TF_CHECK_OK(computation_->Accept(&cost_analysis_.value()));
}
for (const HloInstruction* user : producer->users()) {
if (cost_analysis_->ProducerConsumerMergedTooLarge(*producer, *user)) {
++num_fail_inefficient_fusion_emitter_;
return FusionDecision{} << "if merged with " << user->name()
<< " will generate huge IR";
}
}
GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes(
producer, &*cost_analysis_, GpuPerformanceModelOptions::Default(),
producer->users());
if (t.time_fused > t.time_unfused) {
++num_fail_slower_if_fused_;
return "will execute slower if fused";
}
return {};
}
absl::StatusOr<bool> FusionMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(1) << "FusionMerger for module: " << module->name();
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
VLOG(9) << "Before running FusionInstructionMerger for computation: "
<< computation->name();
XLA_VLOG_LINES(9, computation->ToString());
FusionInstructionMerger fusion_merger(computation, gpu_device_info_,
shape_size_function_);
TF_RETURN_IF_ERROR(fusion_merger.Run());
changed |= fusion_merger.changed();
VLOG(9) << "After running FusionInstructionMerger for computation: "
<< computation->name() << " changed: " << changed;
XLA_VLOG_LINES(9, computation->ToString());
}
return changed;
}
}
} | #include "xla/service/gpu/fusion_merger.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class FusionMergerTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
FusionMerger fusion_merger_{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()};
FusionMergerTest() : HloTestBase() {}
};
TEST_F(FusionMergerTest, MergeSharedFusionInstruction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
comp.3 {
constant.param_0 = f32[4]{0} parameter(0)
param.param_1.2 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(1)
get-tuple-element.6 = f32[4]{0} get-tuple-element(param.param_1.2), index=0
ROOT add.7 = f32[4]{0} add(constant.param_0, get-tuple-element.6)
}
comp.2 {
param.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.4 = f32[4]{0} get-tuple-element(param.param_1.1), index=1
get-tuple-element.5 = f32[4]{0} get-tuple-element(param.param_1.1), index=2
ROOT add.6 = f32[4]{0} add(get-tuple-element.4, get-tuple-element.5)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY MergeSharedFusionInstruction.Computation0 {
constant = f32[4]{0} constant({1, 1, 1, 1})
param = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.3 = f32[4]{0} fusion(constant, param), kind=kLoop, calls=comp.3
fusion.4 = f32[4]{0} fusion(param), kind=kLoop, calls=comp.2
fusion.5 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp.1
fusion.6 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.5, fusion.6)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_EQ(HloOpcode::kTuple, root->opcode());
auto* operand0 = root->operand(0);
EXPECT_EQ(HloOpcode::kFusion, operand0->opcode());
EXPECT_EQ(4, operand0->fused_instruction_count());
auto* operand1 = root->operand(1);
EXPECT_EQ(HloOpcode::kFusion, operand1->opcode());
EXPECT_EQ(7, operand1->fused_instruction_count());
auto* operand2 = root->operand(2);
EXPECT_EQ(HloOpcode::kFusion, operand2->opcode());
EXPECT_EQ(7, operand2->fused_instruction_count());
}
TEST_F(FusionMergerTest, MoreMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT _ = f32[] add(x, y)
}
comp0 {
p = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
gte0 = f32[100000000] get-tuple-element(p), index=0
gte1 = f32[100000000] get-tuple-element(p), index=1
add.9 = f32[100000000] add(gte0, gte1)
gte2 = f32[100000000] get-tuple-element(p), index=2
add.10 = f32[100000000] add(add.9, gte2)
gte3 = f32[100000000] get-tuple-element(p), index=3
add.11 = f32[100000000] add(add.10, gte3)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
gte4 = f32[100000000] get-tuple-element(p1), index=0
gte5 = f32[100000000] get-tuple-element(p1), index=1
add.12 = f32[100000000] add(gte4, gte5)
gte6 = f32[100000000] get-tuple-element(p1), index=2
add.13 = f32[100000000] add(add.12, gte6)
gte7 = f32[100000000] get-tuple-element(p1), index=3
add.14 = f32[100000000] add(add.13, gte7)
ROOT r = f32[100000000] add(add.14, add.11)
}
comp1 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
}
comp2 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
ROOT n = f32[] negate(r)
}
ENTRY m.Computation2 {
p0 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
fusion.0 = f32[100000000] fusion(p0, p1), kind=kLoop, calls=comp0
fusion.1 = f32[] fusion(fusion.0), kind=kLoop, calls=comp1
fusion.2 = f32[] fusion(fusion.0), kind=kLoop, calls=comp2
ROOT tuple = (f32[], f32[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, LessMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
comp.2 {
state.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.5 = f32[4]{0} get-tuple-element(state.param_1.1), index=0
get-tuple-element.6 = f32[4]{0} get-tuple-element(state.param_1.1), index=1
add.7 = f32[4]{0} add(get-tuple-element.5, get-tuple-element.6)
get-tuple-element.7 = f32[4]{0} get-tuple-element(state.param_1.1), index=2
ROOT add.8 = f32[4]{0} add(add.7, get-tuple-element.7)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY m.Computation2 {
constant = f32[4]{0} constant({1, 1, 1, 1})
state = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.2 = f32[4]{0} fusion(state), kind=kLoop, calls=comp.2
fusion.3 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp.1
fusion.4 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.4)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeIntoInputFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[32]{0} parameter(0)
ROOT f1_root = f32[32]{0} add(f1_p0, f1_p0)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[32]{0} parameter(0)
f2_mul = f32[32]{0} multiply(f2_p0, f2_p0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_mul, f2_zero), dimensions={0},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[32]{0} parameter(0)
f1 = f32[32]{0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())));
}
TEST_F(FusionMergerTest, WillMergeIntoUnfusedConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_matmul.36
max (parameter.13: f32[], parameter.14: f32[]) -> f32[] {
parameter.13 = f32[] parameter(0)
parameter.14 = f32[] parameter(1)
ROOT maximum.15 = f32[] maximum(f32[] parameter.13, f32[] parameter.14)
}
add (parameter.29: f32[], parameter.30: f32[]) -> f32[] {
parameter.29 = f32[] parameter(0)
parameter.30 = f32[] parameter(1)
ROOT add.31 = f32[] add(f32[] parameter.29, f32[] parameter.30)
}
fused_computation.1 (param_1.4: f32[200,200,200], param_2.1: f32[200,200]) -> f32[200,200] {
param_1.4 = f32[200,200,200]{2,1,0} parameter(0)
param_2.1 = f32[200,200]{1,0} parameter(1)
broadcast.3 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_2.1), dimensions={0,2}
subtract.0 = f32[200,200,200]{2,1,0} subtract(f32[200,200,200]{2,1,0} param_1.4, f32[200,200,200]{2,1,0} broadcast.3)
exponential.0 = f32[200,200,200]{2,1,0} exponential(f32[200,200,200]{2,1,0} subtract.0)
constant.27 = f32[] constant(0)
ROOT reduce.0 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} exponential.0, f32[] constant.27), dimensions={1}, to_apply=add
}
fused_computation.3 (param_0.7: f32[200,200], param_1.9: f32[200,200]) -> f32[200,200,200] {
param_1.9 = f32[200,200]{1,0} parameter(1)
broadcast.10 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_1.9), dimensions={0,1}
param_0.7 = f32[200,200]{1,0} parameter(0)
broadcast.8 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_0.7), dimensions={1,2}
ROOT add.1 = f32[200,200,200]{2,1,0} add(f32[200,200,200]{2,1,0} broadcast.10, f32[200,200,200]{2,1,0} broadcast.8)
}
ENTRY entry (parameter.1: f32[200,200], parameter.2: f32[200,200]) -> f32[200,200] {
parameter.2 = f32[200,200]{1,0} parameter(1)
parameter.1 = f32[200,200]{1,0} parameter(0)
fusion.3 = f32[200,200,200]{2,1,0} fusion(f32[200,200]{1,0} parameter.2, f32[200,200]{1,0} parameter.1), kind=kLoop, calls=fused_computation.3
constant.11 = f32[] constant(-inf)
reduce.16 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} fusion.3, f32[] constant.11), dimensions={1}, to_apply=max
ROOT fusion.1 = f32[200,200]{1,0} fusion(f32[200,200,200]{2,1,0} fusion.3, f32[200,200]{1,0} reduce.16), kind=kInput, calls=fused_computation.1
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Fusion(), m::Parameter(), m::Parameter())));
}
TEST_F(FusionMergerTest, WillNotMergeReduceUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
add = f32[16,16,256]{0,1,2} add(f1_p0, f1_p0)
ROOT f1_root = f32[16,16,256]{2,1,0} copy(add)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeReduceNotTooUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
slice1 = f32[5,16,256]{0,1,2} slice(f1_p0), slice={[0:5], [0:16], [0:256]}
f1_copy = f32[5,16,256]{2,1,0} copy(slice1)
slice2 = f32[11,16,256]{0,1,2} slice(f1_p0), slice={[0:11], [0:16], [0:256]}
bitcast = f32[11,16,256]{2,1,0} bitcast(slice2)
ROOT f1_root = f32[16,16,256]{2,1,0} concatenate(f1_copy, bitcast), dimensions={0}
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1;
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> entry_params;
for (int64_t i = 0; i < kNumParams; ++i) {
entry_params.push_back(
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")));
}
auto make_fusion = [&](absl::Span<HloInstruction* const> params) {
HloComputation::Builder sub_builder("subcomp");
HloInstruction* sum = nullptr;
for (int64_t i = 0; i < params.size(); ++i) {
auto p = sub_builder.AddInstruction(
HloInstruction::CreateParameter(i, shape, "p"));
if (sum == nullptr) {
sum = p;
} else {
sum = sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, p));
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(sub_builder.Build());
return HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, params, subcomp);
};
auto fusion = b.AddInstruction(
make_fusion(absl::MakeSpan(entry_params)
.subspan(0, MaxOperandsAndOutputsPerFusion())));
b.AddInstruction(make_fusion({entry_params.back(), fusion}));
module->AddEntryComputation(b.Build());
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeIfFusionEmitterIsInefficient) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
Arg_0.5 = f32[200000] parameter(0)
slice.7 = f32[100000] slice(Arg_0.5), slice={[0:199999:2]}
slice.8 = f32[100000] slice(Arg_0.5), slice={[1:200000:2]}
add.9 = f32[100000] add(slice.7, slice.8)
slice.10 = f32[50000] slice(add.9), slice={[0:99999:2]}
slice.11 = f32[50000] slice(add.9), slice={[1:100000:2]}
add.12 = f32[50000] add(slice.10, slice.11)
slice.13 = f32[25000] slice(add.12), slice={[0:49999:2]}
slice.14 = f32[25000] slice(add.12), slice={[1:50000:2]}
add.15 = f32[25000] add(slice.13, slice.14)
slice.16 = f32[12500] slice(add.15), slice={[0:24999:2]}
slice.17 = f32[12500] slice(add.15), slice={[1:25000:2]}
add.18 = f32[12500] add(slice.16, slice.17)
slice.19 = f32[6250] slice(add.18), slice={[0:12499:2]}
slice.20 = f32[6250] slice(add.18), slice={[1:12500:2]}
add.21 = f32[6250] add(slice.19, slice.20)
slice.22 = f32[3125] slice(add.21), slice={[0:6249:2]}
slice.23 = f32[3125] slice(add.21), slice={[1:6250:2]}
ROOT add.24 = f32[3125] add(slice.22, slice.23)
}
f2 {
Arg_0 = f32[3125] parameter(0)
slice.25 = f32[1562] slice(Arg_0), slice={[0:3124:2]}
slice.26 = f32[1562] slice(Arg_0), slice={[1:3125:2]}
add.27 = f32[1562] add(slice.25, slice.26)
slice.28 = f32[781] slice(add.27), slice={[0:1561:2]}
slice.29 = f32[781] slice(add.27), slice={[1:1562:2]}
add.30 = f32[781] add(slice.28, slice.29)
slice.31 = f32[390] slice(add.30), slice={[0:780:2]}
slice.32 = f32[390] slice(add.30), slice={[1:781:2]}
add.33 = f32[390] add(slice.31, slice.32)
slice.34 = f32[195] slice(add.33), slice={[0:389:2]}
slice.35 = f32[195] slice(add.33), slice={[1:390:2]}
add.36 = f32[195] add(slice.34, slice.35)
slice.37 = f32[97] slice(add.36), slice={[0:194:2]}
slice.38 = f32[97] slice(add.36), slice={[1:195:2]}
add.39 = f32[97] add(slice.37, slice.38)
slice.40 = f32[48] slice(add.39), slice={[0:96:2]}
slice.41 = f32[48] slice(add.39), slice={[1:97:2]}
ROOT add.42 = f32[48] add(slice.40, slice.41)
}
ENTRY e {
p0 = f32[200000] parameter(0)
f1 = f32[3125] fusion(p0), kind=kLoop, calls=f1
ROOT r = f32[48] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeSliceIntoReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
p01 = s8[1000000] parameter(0)
ROOT s0 = s8[10] slice(p01), slice={[0:10]}
}
f2 {
p02 = s8[10] parameter(0)
ROOT b0 = s8[10,1000000] broadcast(p02), dimensions={0}
}
ENTRY e {
p0 = s8[1000000] parameter(0)
f1 = s8[10] fusion(p0), kind=kLoop, calls=f1
ROOT r = s8[10,1000000] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsIfSavesMemory) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_a (p: f32[]) -> f32[1024,1024,1024] {
%p = f32[] parameter(0)
%b = f32[1024,1024,1024] broadcast(%p), dimensions={}
ROOT %t = f32[1024,1024,1024] tanh(%b)
}
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
ENTRY entry {
p0 = f32[] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_a
f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_b
f3 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
ROOT f4 = f32[1024,1024,1024] add(f2, f3)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsWithSingleConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] add(%p, %p)
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeExpensiveFusionsWithReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b {
%p = f32[1024,1024,1024] parameter(0)
%t1 = f32[1024,1024,1024] tanh(%p)
%t2 = f32[1024,1024,1024] tanh(%t1)
%t3 = f32[1024,1024,1024] tanh(%t2)
%t4 = f32[1024,1024,1024] tanh(%t3)
%t5 = f32[1024,1024,1024] tanh(%t4)
%t6 = f32[1024,1024,1024] tanh(%t5)
%t7 = f32[1024,1024,1024] tanh(%t6)
%t8 = f32[1024,1024,1024] tanh(%t7)
ROOT %t9 = f32[1024,1024,1024] tanh(%t8)
}
%f_c {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024,2048] broadcast(%p), dimensions={0,1,2}
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024,2048] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, NoMergeWithBitcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.103 {
param_0.310 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.420 = f32[8,512]{1,0} parameter(1)
bitcast.1144 = f32[1,8,512]{2,1,0} bitcast(param_1.420)
convert.252 = f16[1,8,512]{2,1,0} convert(bitcast.1144)
bitcast.1143 = f16[8,512]{1,0} bitcast(convert.252)
broadcast.481 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1143), dimensions={1,2}
divide.15 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.310, broadcast.481)
ROOT bitcast.1142 = f16[8,512,1536]{1,2,0} bitcast(divide.15)
}
fused_computation.105 {
param_1.426 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1896 = f16[1,8,1536,512]{3,2,1,0} bitcast(param_1.426)
transpose.238 = f16[1,8,512,1536]{2,3,1,0} transpose(bitcast.1896), dimensions={0,1,3,2}
param_0.315 = f16[8,512]{1,0} parameter(0)
broadcast.482 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.315), dimensions={1,2}
subtract.22 = f16[1,8,512,1536]{2,3,1,0} subtract(transpose.238, broadcast.482)
ROOT exponential.15 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.22)
}
fused_computation.104 {
param_0.1000 = f16[8,1536,512]{2,1,0} parameter(0)
convert.652 = f32[8,1536,512]{2,1,0} convert(param_0.1000)
constant_752 = f32[] constant(-0)
ROOT reduce.232 = f32[8,512]{1,0} reduce(convert.652, constant_752),
dimensions={1}, to_apply=f32add
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.105 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.105
bitcast.1787 = f16[8,1536,512]{2,1,0} bitcast(fusion.105)
fusion.104 = f32[8,512]{1,0} fusion(bitcast.1787), kind=kInput, calls=fused_computation.104
ROOT fusion.103 = f16[8,512,1536]{1,2,0} fusion(fusion.105, fusion.104), kind=kLoop, calls=fused_computation.103
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.45 {
param_1.194 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1042 = f16[1,8,512,1536]{2,3,1,0} bitcast(param_1.194)
param_0.135 = f16[8,512]{1,0} parameter(0)
broadcast.391 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.135), dimensions={1,2}
subtract.6 = f16[1,8,512,1536]{2,3,1,0} subtract(bitcast.1042, broadcast.391)
ROOT exponential.11 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.6)
}
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.44 {
param_0.869 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
convert.221 = f32[1,8,512,1536]{2,3,1,0} convert(param_0.869)
transpose.212 = f32[1,8,1536,512]{3,2,1,0} transpose(convert.221), dimensions={0,1,3,2}
bitcast.1041 = f32[8,1536,512]{2,1,0} bitcast(transpose.212)
constant_429 = f32[] constant(0)
ROOT reduce.149 = f32[8,512]{1,0} reduce(bitcast.1041, constant_429), dimensions={1}, to_apply=f32add
}
fused_computation.43 {
param_0.130 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.188 = f32[8,512]{1,0} parameter(1)
bitcast.1040 = f32[1,8,512]{2,1,0} bitcast(param_1.188)
convert.220 = f16[1,8,512]{2,1,0} convert(bitcast.1040)
bitcast.1039 = f16[8,512]{1,0} bitcast(convert.220)
broadcast.390 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1039), dimensions={1,2}
divide.11 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.130, broadcast.390)
ROOT bitcast.1038 = f16[8,512,1536]{1,2,0} bitcast(divide.11)
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.45 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.45
fusion.44 = f32[8,512]{1,0} fusion(fusion.45), kind=kInput, calls=fused_computation.44
ROOT fusion.43 = f16[8,512,1536]{1,2,0} fusion(fusion.45, fusion.44), kind=kLoop, calls=fused_computation.43
}
)")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedNoMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
add_float_.56 {
x.57 = f32[] parameter(0)
y.58 = f32[] parameter(1)
ROOT add.59 = f32[] add(x.57, y.58)
}
fused_computation.66 {
constant.635 = f32[] constant(0)
broadcast.257 = f32[459,3]{1,0} broadcast(constant.635), dimensions={}
constant.641 = f32[] constant(1)
broadcast.256 = f32[459,3]{1,0} broadcast(constant.641), dimensions={}
broadcast.255 = f32[459]{0} broadcast(constant.635), dimensions={}
iota.28 = f32[459]{0} iota(), iota_dimension=0
constant.629 = f32[] constant(1.49891067)
broadcast.253 = f32[459]{0} broadcast(constant.629), dimensions={}
multiply.39 = f32[459]{0} multiply(iota.28, broadcast.253)
constant.633 = f32[] constant(-1)
broadcast.252 = f32[459]{0} broadcast(constant.633), dimensions={}
add.31 = f32[459]{0} add(multiply.39, broadcast.252)
ceil.11 = f32[459]{0} ceil(add.31)
constant.630 = f32[] constant(685)
broadcast.251 = f32[459]{0} broadcast(constant.630), dimensions={}
clamp.49 = f32[459]{0} clamp(broadcast.255, ceil.11, broadcast.251)
subtract.11 = f32[459]{0} subtract(clamp.49, multiply.39)
broadcast.249 = f32[459,3]{1,0} broadcast(subtract.11), dimensions={0}
iota.26 = f32[459,3]{1,0} iota(), iota_dimension=1
add.30 = f32[459,3]{1,0} add(broadcast.249, iota.26)
abs.3 = f32[459,3]{1,0} abs(add.30)
subtract.10 = f32[459,3]{1,0} subtract(broadcast.256, abs.3)
maximum.6 = f32[459,3]{1,0} maximum(broadcast.257, subtract.10)
ROOT reduce.3 = f32[459]{0} reduce(maximum.6, constant.635), dimensions={1}, to_apply=add_float_.56
}
fused_computation.67 {
constant.684 = f32[] constant(0)
broadcast.296 = f32[1130,3]{1,0} broadcast(constant.684), dimensions={}
constant.685 = f32[] constant(1)
broadcast.295 = f32[1130,3]{1,0} broadcast(constant.685), dimensions={}
broadcast.294 = f32[1130]{0} broadcast(constant.684), dimensions={}
iota.41 = f32[1130]{0} iota(), iota_dimension=0
constant.675 = f32[] constant(1.34513271)
broadcast.293 = f32[1130]{0} broadcast(constant.675), dimensions={}
multiply.47 = f32[1130]{0} multiply(iota.41, broadcast.293)
constant.677 = f32[] constant(-1)
broadcast.290 = f32[1130]{0} broadcast(constant.677), dimensions={}
add.39 = f32[1130]{0} add(multiply.47, broadcast.290)
ceil.15 = f32[1130]{0} ceil(add.39)
constant.676 = f32[] constant(1517)
broadcast.289 = f32[1130]{0} broadcast(constant.676), dimensions={}
clamp.53 = f32[1130]{0} clamp(broadcast.294, ceil.15, broadcast.289)
subtract.19 = f32[1130]{0} subtract(clamp.53, multiply.47)
broadcast.287 = f32[1130,3]{1,0} broadcast(subtract.19), dimensions={0}
iota.39 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.38 = f32[1130,3]{1,0} add(broadcast.287, iota.39)
abs.7 = f32[1130,3]{1,0} abs(add.38)
subtract.18 = f32[1130,3]{1,0} subtract(broadcast.295, abs.7)
maximum.10 = f32[1130,3]{1,0} maximum(broadcast.296, subtract.18)
ROOT reduce.4 = f32[1130]{0} reduce(maximum.10, constant.684), dimensions={1}, to_apply=add_float_.56
}
fused_computation.59 {
constant.532 = f32[] constant(0)
broadcast.316 = f32[1130,3]{1,0} broadcast(constant.532), dimensions={}
constant.663 = f32[] constant(1)
broadcast.315 = f32[1130,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.314 = f32[1130]{0} broadcast(constant.532), dimensions={}
iota.47 = f32[1130]{0} iota(), iota_dimension=0
constant.579 = f32[] constant(1.34513271)
broadcast.311 = f32[1130]{0} broadcast(constant.579), dimensions={}
multiply.51 = f32[1130]{0} multiply(iota.47, broadcast.311)
constant.578 = f32[] constant(-1)
broadcast.310 = f32[1130]{0} broadcast(constant.578), dimensions={}
add.43 = f32[1130]{0} add(multiply.51, broadcast.310)
ceil.17 = f32[1130]{0} ceil(add.43)
constant.576 = f32[] constant(1517)
broadcast.309 = f32[1130]{0} broadcast(constant.576), dimensions={}
clamp.55 = f32[1130]{0} clamp(broadcast.314, ceil.17, broadcast.309)
subtract.24 = f32[1130]{0} subtract(clamp.55, multiply.51)
broadcast.306 = f32[1130,3]{1,0} broadcast(subtract.24), dimensions={0}
iota.45 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.42 = f32[1130,3]{1,0} add(broadcast.306, iota.45)
abs.9 = f32[1130,3]{1,0} abs(add.42)
subtract.23 = f32[1130,3]{1,0} subtract(broadcast.315, abs.9)
maximum.12 = f32[1130,3]{1,0} maximum(broadcast.316, subtract.23)
param_2.183 = f32[1130]{0} parameter(2)
broadcast.172 = f32[1130,3]{1,0} broadcast(param_2.183), dimensions={0}
divide.3 = f32[1130,3]{1,0} divide(maximum.12, broadcast.172)
bitcast.53 = f32[3390]{0} bitcast(divide.3)
broadcast.171 = f32[3390,1377]{1,0} broadcast(bitcast.53), dimensions={0}
broadcast.276 = f32[459,3]{1,0} broadcast(constant.532), dimensions={}
broadcast.275 = f32[459,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.274 = f32[459]{0} broadcast(constant.532), dimensions={}
iota.35 = f32[459]{0} iota(), iota_dimension=0
constant.614 = f32[] constant(1.49891067)
broadcast.273 = f32[459]{0} broadcast(constant.614), dimensions={}
multiply.43 = f32[459]{0} multiply(iota.35, broadcast.273)
broadcast.272 = f32[459]{0} broadcast(constant.578), dimensions={}
add.35 = f32[459]{0} add(multiply.43, broadcast.272)
ceil.13 = f32[459]{0} ceil(add.35)
constant.611 = f32[] constant(685)
broadcast.269 = f32[459]{0} broadcast(constant.611), dimensions={}
clamp.51 = f32[459]{0} clamp(broadcast.274, ceil.13, broadcast.269)
subtract.15 = f32[459]{0} subtract(clamp.51, multiply.43)
broadcast.267 = f32[459,3]{1,0} broadcast(subtract.15), dimensions={0}
iota.33 = f32[459,3]{1,0} iota(), iota_dimension=1
add.34 = f32[459,3]{1,0} add(broadcast.267, iota.33)
abs.5 = f32[459,3]{1,0} abs(add.34)
subtract.14 = f32[459,3]{1,0} subtract(broadcast.275, abs.5)
maximum.8 = f32[459,3]{1,0} maximum(broadcast.276, subtract.14)
param_1.177 = f32[459]{0} parameter(1)
broadcast.170 = f32[459,3]{1,0} broadcast(param_1.177), dimensions={0}
divide.2 = f32[459,3]{1,0} divide(maximum.8, broadcast.170)
bitcast.52 = f32[1377]{0} bitcast(divide.2)
broadcast.169 = f32[3390,1377]{1,0} broadcast(bitcast.52), dimensions={1}
multiply.15 = f32[3390,1377]{1,0} multiply(broadcast.171, broadcast.169)
bitcast.61 = f32[1130,3,459,3]{3,2,1,0} bitcast(multiply.15)
transpose.68 = f32[459,1130,3,3]{2,0,3,1} transpose(bitcast.61), dimensions={2,0,3,1}
copy.1 = f | 2,068 |
#ifndef XLA_SERVICE_GPU_CUDNN_SIMPLIFY_PADDING_H_
#define XLA_SERVICE_GPU_CUDNN_SIMPLIFY_PADDING_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla::gpu {
class CudnnSimplifyPadding : public HloModulePass {
public:
CudnnSimplifyPadding() = default;
absl::string_view name() const override { return "cudnn_simplify_padding"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/cudnn_simplify_padding.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
std::optional<int64_t> FindFalseIndex(absl::Span<const bool> vals) {
std::optional<int64_t> missing_dim;
for (int i = 0; i < vals.size(); i++) {
if (vals[i]) {
continue;
}
if (missing_dim.has_value()) {
VLOG(2) << "Multiple dimensions are missing from conv dnums; can't "
"determine which is vect_c dimension";
return std::nullopt;
}
missing_dim = i;
}
return missing_dim;
}
std::optional<int64_t> FindOutputVectCDim(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t num_dims = conv->shape().tuple_shapes(0).dimensions_size();
absl::InlinedVector<bool, 5> seen_dims(num_dims);
seen_dims[dnums.output_batch_dimension()] = true;
seen_dims[dnums.output_feature_dimension()] = true;
for (int64_t d : dnums.output_spatial_dimensions()) {
seen_dims[d] = true;
}
return FindFalseIndex(seen_dims);
}
std::optional<int64_t> FindKernelVectCDim(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t num_dims = conv->operand(1)->shape().dimensions_size();
absl::InlinedVector<bool, 5> seen_dims(num_dims);
seen_dims[dnums.kernel_input_feature_dimension()] = true;
seen_dims[dnums.kernel_output_feature_dimension()] = true;
for (int64_t d : dnums.kernel_spatial_dimensions()) {
seen_dims[d] = true;
}
return FindFalseIndex(seen_dims);
}
std::optional<int64_t> NumTrailingZeroOutputFeatures(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
int64_t feature_dim = dnums.kernel_output_feature_dimension();
const HloInstruction* weights = conv->operand(1);
auto backend_config = conv->backend_config<GpuBackendConfig>();
if (backend_config.ok() &&
backend_config->cudnn_conv_backend_config().reordered_int8_nchw_vect()) {
VLOG(2) << "Matched int8x32 convolution with filter reordering";
const HloInstruction *reshape, *transpose;
bool matched =
Match(weights, m::Reshape(m::Transpose(
&transpose, m::Reshape(&reshape, m::Op(&weights)))));
if (!matched || feature_dim != 0 || transpose->shape().rank() != 8) {
VLOG(2) << "The filter output feature dimension cannot be determined, as "
"the reordering sequence is modified";
return std::nullopt;
}
const auto& transpose_dimensions =
Cast<HloTransposeInstruction>(transpose)->dimensions();
int64_t preceding_size = 1;
for (int64_t i = transpose_dimensions.at(3) - 1; i >= 0; --i) {
preceding_size *= reshape->shape().dimensions(i);
}
int64_t accumulated_size = 1;
for (int64_t size : weights->shape().dimensions()) {
if (accumulated_size < preceding_size) {
accumulated_size *= size;
++feature_dim;
} else {
break;
}
}
if (accumulated_size != preceding_size) {
VLOG(2) << "Something is really wrong here, I give up";
return std::nullopt;
}
VLOG(2) << "Computed output feature dimension: " << feature_dim;
}
VLOG(2) << "Computing NumTrailingZeroOutputFeatures of " << conv->ToString()
<< "\nwith weights " << weights->ToString();
if (Match(weights, m::Pad(m::Op(), m::ConstantEffectiveScalar(0)))) {
const PaddingConfig::PaddingConfigDimension& padding_config =
weights->padding_config().dimensions(feature_dim);
VLOG(2) << "Success: Weights is a pad; padding on output feature dim is "
<< padding_config.edge_padding_high();
return padding_config.edge_padding_high();
} else if (const HloInstruction * pad; Match(
weights, m::Reshape(m::Pad(&pad, m::Op(),
m::ConstantEffectiveScalar(0))))) {
std::optional<int64_t> vect_c_dim = FindKernelVectCDim(conv);
if (!vect_c_dim.has_value()) {
VLOG(2) << "fail: Can't find vect_c dimension in conv.";
return std::nullopt;
}
if (*vect_c_dim != dnums.kernel_input_feature_dimension() + 1) {
VLOG(2) << "fail: vect_c dim is in the wrong place; should be right "
"after kernel input feature dims in conv.";
return std::nullopt;
}
absl::InlinedVector<int64_t, 5> expected_pad_dim_sizes(
weights->shape().dimensions().begin(),
weights->shape().dimensions().end());
expected_pad_dim_sizes[dnums.kernel_input_feature_dimension()] *=
weights->shape().dimensions(*vect_c_dim);
expected_pad_dim_sizes.erase(expected_pad_dim_sizes.begin() + *vect_c_dim);
if (pad->shape().dimensions() != expected_pad_dim_sizes) {
VLOG(2) << "fail: Reshape doesn't simply merge vect_c dimension into "
"input features dim "
<< weights->ToString() << " but expected dims "
<< absl::StrJoin(expected_pad_dim_sizes, ",");
return std::nullopt;
}
int64_t feature_dim_before_reshape = feature_dim;
if (dnums.kernel_output_feature_dimension() >
dnums.kernel_input_feature_dimension()) {
feature_dim_before_reshape--;
}
const PaddingConfig::PaddingConfigDimension& padding_config =
pad->padding_config().dimensions(feature_dim_before_reshape);
VLOG(2) << "Success: Weights is a reshape of a pad; padding on output "
"feature dim is "
<< padding_config.edge_padding_high();
return padding_config.edge_padding_high();
} else if (Match(weights, m::Constant())) {
const Literal& lit = weights->literal();
const auto& dims = weights->shape().dimensions();
absl::InlinedVector<int64_t, 5> multi_index;
for (int64_t dim : dims) {
multi_index.push_back(dim - 1);
}
auto decrement_multi_index = [&] {
for (int i = 0; i < multi_index.size(); ++i) {
if (i != feature_dim) {
int64_t& idx = multi_index[i];
--idx;
if (idx == -1) {
idx = dims[i] - 1;
} else {
return true;
}
}
}
int64_t& idx = multi_index[feature_dim];
--idx;
return idx != -1;
};
do {
if (!lit.IsZero(multi_index)) {
break;
}
} while (decrement_multi_index());
int64_t first_trailing_zero_feature = multi_index[feature_dim] + 1;
if (first_trailing_zero_feature == 0) {
VLOG(2) << "Weights constant is entirely zero.";
} else {
VLOG(2) << "First nonzero index in weights constant is "
<< absl::StrJoin(multi_index, ",");
}
int64_t ret =
std::max<int64_t>(0, weights->shape().dimensions(feature_dim) -
first_trailing_zero_feature);
VLOG(2) << "Success: weights is a constant; num zero trailing output "
"features is "
<< ret;
return ret;
}
return std::nullopt;
}
absl::StatusOr<bool> TrySimplifyPadding(HloInstruction* instr) {
HloInstruction* conv;
HloInstruction* transpose = nullptr;
HloInstruction* reshape = nullptr;
HloInstruction* slice;
HloInstruction* pad;
auto conv_matcher = m::GetTupleElement(
m::CustomCall(&conv).WithPredicate([](const HloInstruction* instr) {
return instr->custom_call_target() == kCudnnConvForwardCallTarget ||
instr->custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget;
}),
0);
auto pad_matcher = m::Pad(m::Op(), m::ConstantEffectiveScalar(0));
if (!MatchAndLogIfFailed(instr, "conv-slice-pad",
m::Pad(&pad, m::Slice(&slice, conv_matcher),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher) &&
!MatchAndLogIfFailed(
instr, "conv-reshape-slice-pad",
m::Pad(&pad, m::Slice(&slice, m::Reshape(&reshape, conv_matcher)),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher) &&
!MatchAndLogIfFailed(
instr, "conv-transpose-reshape-slice-pad",
m::Pad(&pad,
m::Slice(&slice,
m::Reshape(&reshape,
m::Transpose(&transpose, conv_matcher))),
m::ConstantEffectiveScalar(0)),
VLOG_IS_ON(3), pad_matcher)) {
return false;
}
VLOG(2) << "Found pattern to attempt to simplify:\n"
<< "conv: " << conv->ToString()
<< "\ntranspose: "
<< (transpose != nullptr ? transpose->ToString() : "(null)")
<< "\nreshape: "
<< (reshape != nullptr ? reshape->ToString() : "(null)")
<< "\nslice: " << slice->ToString()
<< "\npad: " << pad->ToString();
std::optional<int64_t> num_known_zero_output_features =
NumTrailingZeroOutputFeatures(conv);
if (!num_known_zero_output_features.has_value() ||
*num_known_zero_output_features == 0) {
VLOG(2) << "fail: Didn't find any known-zero output features";
return false;
}
const auto& dnums = conv->convolution_dimension_numbers();
int64_t output_feature_dim;
if (reshape == nullptr) {
CHECK_EQ(transpose, nullptr);
output_feature_dim = dnums.output_feature_dimension();
} else {
std::optional<int64_t> vect_c_dim_before_transpose =
FindOutputVectCDim(conv);
if (!vect_c_dim_before_transpose.has_value()) {
VLOG(2) << "Couldn't find vect_c output dim in conv.";
return false;
}
int64_t feature_dim_after_transpose;
int64_t vect_c_dim_after_transpose;
if (transpose == nullptr) {
feature_dim_after_transpose = dnums.output_feature_dimension();
vect_c_dim_after_transpose = *vect_c_dim_before_transpose;
} else {
const auto& transpose_dims = transpose->dimensions();
feature_dim_after_transpose = std::distance(
transpose->dimensions().begin(),
absl::c_find(transpose_dims, dnums.output_feature_dimension()));
vect_c_dim_after_transpose = std::distance(
transpose->dimensions().begin(),
absl::c_find(transpose_dims, *vect_c_dim_before_transpose));
}
if (vect_c_dim_after_transpose != feature_dim_after_transpose + 1) {
VLOG(2) << "fail: after transpose (if present), vect_c dim must appear "
"immediately after output feature dim: Computed "
"vect_d_dim_after_transpose to be "
<< vect_c_dim_after_transpose;
return false;
}
absl::InlinedVector<int64_t, 5> expected_reshape_dim_sizes(
reshape->operand(0)->shape().dimensions().begin(),
reshape->operand(0)->shape().dimensions().end());
expected_reshape_dim_sizes[feature_dim_after_transpose] *=
expected_reshape_dim_sizes[vect_c_dim_after_transpose];
expected_reshape_dim_sizes.erase(expected_reshape_dim_sizes.begin() +
vect_c_dim_after_transpose);
if (reshape->shape().dimensions() != expected_reshape_dim_sizes) {
VLOG(2) << "fail: Reshape doesn't merge vect_c with feature dimension.";
return false;
}
output_feature_dim = feature_dim_after_transpose;
}
if (!absl::c_all_of(slice->slice_starts(), [](auto v) { return v == 0; }) ||
!absl::c_all_of(slice->slice_strides(), [](auto v) { return v == 1; })) {
VLOG(2) << "fail: Slice doesn't start at the front or has stride != 1.";
return false;
}
for (int64_t dim = 0; dim < slice->slice_limits().size(); dim++) {
if (slice->slice_starts(dim) != 0 || slice->slice_strides(dim) != 1 ||
(dim != output_feature_dim &&
slice->slice_limits(dim) !=
slice->operand(0)->shape().dimensions(dim))) {
VLOG(2) << "fail: Slice removes something other than the features dim.";
return false;
}
}
int64_t num_sliced_from_feature_dim =
slice->operand(0)->shape().dimensions(output_feature_dim) -
slice->slice_limits(output_feature_dim);
if (num_sliced_from_feature_dim > *num_known_zero_output_features) {
VLOG(2) << "fail: Slice removes " << num_sliced_from_feature_dim
<< " features from the conv, but only "
<< *num_known_zero_output_features
<< " features in the conv are known to be zero.";
return false;
}
if (pad->padding_config().dimensions(output_feature_dim).interior_padding() !=
0) {
VLOG(2)
<< "fail: Can't merge slice into pad because pad adds interior padding "
"in feature dimension.";
return false;
}
VLOG(1) << "Eliminating " << num_sliced_from_feature_dim
<< " elements of padding from conv " << conv->name();
PaddingConfig new_padding_config = pad->padding_config();
PaddingConfig::PaddingConfigDimension* new_pad_feature_dim =
new_padding_config.mutable_dimensions(output_feature_dim);
new_pad_feature_dim->set_edge_padding_high(
new_pad_feature_dim->edge_padding_high() - num_sliced_from_feature_dim);
TF_ASSIGN_OR_RETURN(HloInstruction * new_pad,
MakePadHlo(slice->mutable_operand(0),
pad->mutable_operand(1), new_padding_config));
TF_RETURN_IF_ERROR(pad->parent()->ReplaceInstruction(pad, new_pad));
return true;
}
}
absl::StatusOr<bool> CudnnSimplifyPadding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool c, TrySimplifyPadding(instr));
changed |= c;
}
}
return changed;
}
} | #include "xla/service/gpu/cudnn_simplify_padding.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gpu/cudnn_pad_for_convolutions.h"
#include "xla/service/gpu/cudnn_vectorize_convolutions.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class CudnnSimplifyPaddingTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunEndToEnd(std::pair<int, int> compute_capability,
HloModule* module) {
se::CudaComputeCapability cc{compute_capability.first,
compute_capability.second};
TF_RETURN_IF_ERROR(
RunHloPass(CudnnPadForConvolutions(cc), module).status());
TF_RETURN_IF_ERROR(
RunHloPass(CudnnVectorizeConvolutions(
cc, se::dnn::VersionInfo{8, 3, 0}),
module)
.status());
VLOG(1) << "after vectorizing convs:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(CallInliner(), module).status());
VLOG(1) << "after inliner:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(TupleSimplifier(), module).status());
VLOG(1) << "after tuple simplifier:\n" << module->ToString();
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(CudnnSimplifyPadding(), module));
VLOG(1) << "after simplify_padding:\n" << module->ToString();
{
HloPassFix<HloPassPipeline> pipeline("reshape-mover and algsimp");
pipeline.AddPass<ReshapeMover>();
pipeline.AddPass<AlgebraicSimplifier>(AlgebraicSimplifierOptions());
TF_RETURN_IF_ERROR(RunHloPass(pipeline, module).status());
}
VLOG(1) << "after reshape mover + algsimp:\n" << module->ToString();
return changed;
}
absl::StatusOr<bool> RunJustThisPass(HloModule* module) {
TF_ASSIGN_OR_RETURN(bool changed,
RunHloPass(CudnnSimplifyPadding(), module));
VLOG(1) << "after simplify_padding:\n" << module->ToString();
TF_RETURN_IF_ERROR(RunHloPass(HloPassFix<AlgebraicSimplifier>(
AlgebraicSimplifierOptions()),
module)
.status());
return changed;
}
};
void ExpectOnlyPadsOneDim(int64_t dim, int64_t padding_high,
const PaddingConfig& p) {
SCOPED_TRACE(p.DebugString());
for (int i = 0; i < p.dimensions_size(); ++i) {
SCOPED_TRACE(absl::StrCat("dimension ", i));
EXPECT_EQ(p.dimensions(i).edge_padding_low(), 0);
if (i == dim) {
EXPECT_EQ(p.dimensions(i).edge_padding_high(), padding_high);
} else {
EXPECT_EQ(p.dimensions(i).edge_padding_high(), 0);
}
}
}
template <typename NativeT>
void SetConstantValue(
HloInstruction* instr,
absl::FunctionRef<NativeT(absl::Span<const int64_t>, NativeT)> value_fn) {
Literal new_literal = instr->literal().Clone();
new_literal.MutableEachCell<int8_t>(value_fn);
TF_EXPECT_OK(instr->parent()->ReplaceWithNewInstruction(
instr, HloInstruction::CreateConstant(std::move(new_literal))));
}
TEST_F(CudnnSimplifyPaddingTest, EndToEnd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv1 = (s8[10,20,30,190], u8[0]) custom-call(
s8[10,20,30,63] parameter(0), s8[3,5,63,190] parameter(1),
f32[10] parameter(2), s8[10,20,30,190] parameter(3)),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBiasActivationForward"
conv1_result = get-tuple-element(conv1), index=0
ROOT conv2 = (s8[10,20,30,29], u8[0]) custom-call(
conv1_result, s8[3,5,190,29] parameter(4),
f32[10] parameter(5), s8[10,20,30,29] parameter(6)),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBiasActivationForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::Slice(m::Reshape(m::GetTupleElement(m::CustomCall(
{"__cudnn$convBiasActivationForward"},
m::GetTupleElement(
m::CustomCall({"__cudnn$convBiasActivationForward"}), 0),
m::Op(), m::Op(), m::Op())))),
m::Op())));
}
TEST_F(CudnnSimplifyPaddingTest, EndToEndNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv1 = (s8[1,64,480,400], u8[0]) custom-call(
s8[1,112,480,400] parameter(0), s8[3,3,112,64] parameter(1),
f32[64] parameter(2)),
window={size=3x3}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convBiasActivationForward"
conv1_result = get-tuple-element(conv1), index=0
convert = f32[1,64,480,400] convert(conv1_result)
constant = f32[] constant(0.349002093)
broadcast = f32[1,64,480,400] broadcast(constant)
ROOT multiply = f32[1,64,480,400] multiply(convert, broadcast)
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_FALSE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(m::Multiply())));
}
TEST_F(CudnnSimplifyPaddingTest, PaddedWeights) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedWeightsNotPaddedEnough) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_3
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,32,64,3,3] reshape(weights_p)
conv = (s8[10,2,32,10,10], u8[0]) custom-call(
s8[10,2,32,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=bf?01_i?o01->bf?01,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_5x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root, GmockMatch(
m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(1, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNHWC) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[3,3,64,60] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
weights = s8[3,3,2,32,64] reshape(weights_p)
conv = (s8[10,10,10,2,32], u8[0]) custom-call(
s8[10,10,10,2,32] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f?_01i?o->b01f?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,60] slice(s8[10,10,10,64] reshape(conv_result)), slice={[0:10], [0:10], [0:10], [0:60]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root, GmockMatch(
m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedTransposedAndReshapedOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,32,64,3,3] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_i?o01->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Pad(
&pad,
m::Reshape(m::Transpose(m::GetTupleElement(m::CustomCall(), 0))),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(1, 2, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeight) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(0),
s8[3,3,10,10] constant({...})
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
{
HloInstruction* weights = nullptr;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall(
m::Op(), m::Constant(&weights)))),
m::Op())));
SetConstantValue<int8_t>(
weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t {
if (dims[3] < 6) return 1;
return 0;
});
}
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* pad = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0),
m::ConstantScalar(0))));
ExpectOnlyPadsOneDim(3, 1, pad->padding_config());
}
TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeightIsNotLargeEnough) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(0),
s8[3,3,10,10] constant({...})
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
{
HloInstruction* weights = nullptr;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall(
m::Op(), m::Constant(&weights)))),
m::Op())));
SetConstantValue<int8_t>(
weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t {
if (dims[3] < 5 ) return 0;
return 1;
});
}
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, ReshapeDoesntMergeVectCDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,32] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights_p
), window={size=3x3}, dim_labels=bf01?_io01?->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,32] reshape(weights_p)
conv = (s8[10,2,10,10,4,8], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_io01?->bf01??,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,4,8,10,10] transpose(conv_result), dimensions={0,1,4,5,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInKernel) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0
weights = s8[2,64,3,3,4,8] reshape(weights_p)
conv = (s8[10,2,10,10,32], u8[0]) custom-call(
s8[10,2,10,10,32] parameter(1),
weights
), window={size=3x3}, dim_labels=bf01?_io01??->bf01?,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3}
slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginning) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,9,10,6] slice(conv_result), slice={[0:10], [1:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginningOfFeatureDim) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,5] slice(conv_result), slice={[0:10], [0:10], [0:10], [1:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceHasStride) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,3] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6:2]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, PadAddsInteriorPadding) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5_1
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, SliceMoreElementsThanPad) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4
conv = (s8[10,10,10,10], u8[0]) custom-call(
s8[10,10,10,10] parameter(1),
weights
), window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
conv_result = get-tuple-element(conv), index=0
slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]}
ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_2
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* slice = nullptr;
ASSERT_THAT(root, GmockMatch(m::Slice(
&slice, m::GetTupleElement(m::CustomCall(), 0))));
for (int64_t i = 0; i < slice->shape().dimensions_size(); ++i) {
SCOPED_TRACE(i);
EXPECT_EQ(slice->slice_starts(i), 0);
EXPECT_EQ(slice->slice_strides(i), 1);
if (i != 3) {
EXPECT_EQ(slice->slice_limits(i), 10);
} else {
EXPECT_EQ(slice->slice_limits(i), 8);
}
}
}
TEST_F(CudnnSimplifyPaddingTest, NoChangeOnNonTrivialConstants) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,1,12]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:1], [0:12]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,3,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x2_0x0_0
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, NoChangeOnComplexSlices) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,5,4]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [2:6]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_8
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, ScanOrderFeatureDimLast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_outer
ENTRY main.26 {
reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0)
constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
}, {
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } })
cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward"
get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0
slice.2 = f32[1,5,5,6]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [0:6]}
constant.0 = f32[] constant(0)
ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_6
}
)")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputFirst) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv.1 = (s8[1,63,80,80], u8[0]) custom-call(
s8[1,112,80,80] parameter(0), s8[63,112,3,3] parameter(1)),
window={size=3x3}, dim_labels=bf01_oi01->bf01,
custom_call_target="__cudnn$convForward"
gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0
const.0 = s8[] constant(0)
ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
}
TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputLast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
conv.1 = (s8[1,63,80,80], u8[0]) custom-call(
s8[1,112,80,80] parameter(0), s8[3,3,112,63] parameter(1)),
window={size=3x3}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convForward"
gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0
const.0 = s8[] constant(0)
ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get()));
EXPECT_TRUE(changed);
}
}
} | 2,069 |
#ifndef XLA_SERVICE_GPU_GPU_CONV_PADDING_LEGALIZATION_H_
#define XLA_SERVICE_GPU_GPU_CONV_PADDING_LEGALIZATION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuConvPaddingLegalization : public HloModulePass {
public:
absl::string_view name() const override {
return "gpu-conv-padding-legalization";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
bool CanonicalizeForwardConvolution(HloInstruction* conv);
bool CanonicalizeBackwardFilterConvolution(HloInstruction* backward_conv);
bool CanonicalizeBackwardInputConvolution(HloInstruction* backward_conv);
};
}
}
#endif
#include "xla/service/gpu/gpu_conv_padding_legalization.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsForwardConvolutionCanonical(const HloInstruction& conv) {
CHECK(conv.custom_call_target() == kCudnnConvForwardCallTarget ||
conv.custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget ||
conv.custom_call_target() == kCudnnConvForwardGraphCallTarget);
return window_util::HasSymmetricPadding(conv.window()) &&
!window_util::HasNegativePadding(conv.window()) &&
!window_util::HasDilation(conv.window());
}
HloInstruction* MaybePaddedAndSlicedInput(
Window* conv_window, const ConvolutionDimensionNumbers& conv_dnums,
HloInstruction* input) {
HloComputation* computation = input->parent();
if (!window_util::HasSymmetricPadding(*conv_window) ||
window_util::HasBaseDilation(*conv_window)) {
PaddingConfig padding_config =
MakeNoPaddingConfig(input->shape().dimensions_size());
for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.input_spatial_dimensions(i);
if (conv_window->dimensions(i).padding_low() > 0) {
padding_config.mutable_dimensions(dim)->set_edge_padding_low(
conv_window->dimensions(i).padding_low());
conv_window->mutable_dimensions(i)->set_padding_low(0);
}
if (conv_window->dimensions(i).padding_high() > 0) {
padding_config.mutable_dimensions(dim)->set_edge_padding_high(
conv_window->dimensions(i).padding_high());
conv_window->mutable_dimensions(i)->set_padding_high(0);
}
if (conv_window->dimensions(i).base_dilation() != 1) {
padding_config.mutable_dimensions(dim)->set_interior_padding(
conv_window->dimensions(i).base_dilation() - 1);
conv_window->mutable_dimensions(i)->set_base_dilation(1);
}
}
PrimitiveType element_type = input->shape().element_type();
HloInstruction* padding = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
input =
MakePadHlo(input, padding, padding_config, &input->metadata()).value();
}
if (window_util::HasNegativePadding(*conv_window)) {
std::vector<int64_t> start_indices(input->shape().dimensions_size(), 0);
std::vector<int64_t> limit_indices(input->shape().dimensions().begin(),
input->shape().dimensions().end());
std::vector<int64_t> strides(input->shape().dimensions_size(), 1);
for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.input_spatial_dimensions(i);
if (conv_window->dimensions(i).padding_low() < 0) {
start_indices[dim] += -conv_window->dimensions(i).padding_low();
conv_window->mutable_dimensions(i)->set_padding_low(0);
}
if (conv_window->dimensions(i).padding_high() < 0) {
limit_indices[dim] -= -conv_window->dimensions(i).padding_high();
conv_window->mutable_dimensions(i)->set_padding_high(0);
}
}
input = MakeSliceHlo(input, start_indices, limit_indices, strides).value();
}
return input;
}
HloInstruction* MaybePaddedKernel(const Window& conv_window,
const ConvolutionDimensionNumbers& conv_dnums,
HloInstruction* kernel) {
if (!window_util::HasWindowDilation(conv_window)) {
return kernel;
}
PaddingConfig padding_config;
for (size_t i = 0; i < kernel->shape().dimensions_size(); ++i) {
padding_config.add_dimensions();
}
for (size_t i = 0; i < conv_dnums.kernel_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.kernel_spatial_dimensions(i);
padding_config.mutable_dimensions(dim)->set_interior_padding(
conv_window.dimensions(i).window_dilation() - 1);
}
HloComputation* computation = kernel->parent();
PrimitiveType element_type = kernel->shape().element_type();
HloInstruction* padding = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
return MakePadHlo(kernel, padding, padding_config, &kernel->metadata())
.value();
}
}
bool GpuConvPaddingLegalization::CanonicalizeForwardConvolution(
HloInstruction* conv) {
if (IsForwardConvolutionCanonical(*conv)) {
return false;
}
Window new_conv_window = conv->window();
HloInstruction* new_input = MaybePaddedAndSlicedInput(
&new_conv_window, conv->convolution_dimension_numbers(),
conv->mutable_operand(0));
HloInstruction* new_kernel =
MaybePaddedKernel(new_conv_window, conv->convolution_dimension_numbers(),
conv->mutable_operand(1));
for (size_t i = 0; i < new_conv_window.dimensions_size(); ++i) {
WindowDimension* dim = new_conv_window.mutable_dimensions(i);
dim->set_size(new_kernel->shape().dimensions(
conv->convolution_dimension_numbers().kernel_spatial_dimensions(i)));
dim->set_window_dilation(1);
}
VLOG(1) << "Canonicalizing forward conv";
std::vector<HloInstruction*> operands(conv->operands().begin(),
conv->operands().end());
operands[0] = new_input;
operands[1] = new_kernel;
auto new_conv = conv->parent()->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), operands));
new_conv->set_window(new_conv_window);
VLOG(1) << "Replacing:\n " << conv->ToString() << "\nwith:\n "
<< new_conv->ToString();
TF_CHECK_OK(conv->parent()->ReplaceInstruction(conv, new_conv));
return true;
}
namespace {
void IncreasePaddingLowBy(int64_t delta, WindowDimension* window_dim) {
window_dim->set_padding_low(window_dim->padding_low() + delta);
}
void IncreasePaddingHighBy(int64_t delta, WindowDimension* window_dim) {
window_dim->set_padding_high(window_dim->padding_high() + delta);
}
}
bool GpuConvPaddingLegalization::CanonicalizeBackwardFilterConvolution(
HloInstruction* backward_conv) {
CHECK_EQ(backward_conv->custom_call_target(),
kCudnnConvBackwardFilterCallTarget);
if (window_util::HasSymmetricPadding(backward_conv->window())) {
return false;
}
HloInstruction* input = backward_conv->mutable_operand(0);
Window new_backward_conv_window = backward_conv->window();
PaddingConfig input_padding_config =
MakeNoPaddingConfig(input->shape().rank());
ConvolutionDimensionNumbers backward_conv_dnums =
backward_conv->convolution_dimension_numbers();
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
if (padding_low < 0 || padding_high < 0) {
return false;
}
int64_t new_conv_padding = std::min(padding_low, padding_high);
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_low(
padding_low - new_conv_padding);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
padding_high - new_conv_padding);
auto* new_dim = new_backward_conv_window.mutable_dimensions(i);
new_dim->set_padding_low(new_conv_padding);
new_dim->set_padding_high(new_conv_padding);
}
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(1);
HloInstruction* padding =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(input->shape().element_type())));
HloInstruction* padded_input =
MakePadHlo(input, padding, input_padding_config).value();
HloInstruction* new_backward_conv =
computation->AddInstruction(backward_conv->CloneWithNewOperands(
backward_conv->shape(), {padded_input, output}));
new_backward_conv->set_window(new_backward_conv_window);
VLOG(1) << "Canonicalizing backward filter conv";
VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n "
<< new_backward_conv->ToString();
TF_CHECK_OK(
computation->ReplaceInstruction(backward_conv, new_backward_conv));
return true;
}
bool GpuConvPaddingLegalization::CanonicalizeBackwardInputConvolution(
HloInstruction* backward_conv) {
if (window_util::HasSymmetricPadding(backward_conv->window())) {
return false;
}
Window new_backward_conv_window = backward_conv->window();
ConvolutionDimensionNumbers backward_conv_dnums =
backward_conv->convolution_dimension_numbers();
Shape backward_conv_shape = backward_conv->shape().tuple_shapes(0);
Shape new_backward_conv_shape = backward_conv_shape;
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
if (padding_low < 0 || padding_high < 0) {
return false;
}
if (padding_low > padding_high) {
IncreasePaddingLowBy(padding_high - padding_low,
new_backward_conv_window.mutable_dimensions(i));
} else if (padding_low < padding_high) {
IncreasePaddingHighBy(padding_low - padding_high,
new_backward_conv_window.mutable_dimensions(i));
}
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
new_backward_conv_shape.set_dimensions(
dim, new_backward_conv_shape.dimensions(dim) +
std::abs(padding_low - padding_high));
}
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(0);
HloInstruction* filter = backward_conv->mutable_operand(1);
HloInstruction* new_backward_conv_call =
computation->AddInstruction(backward_conv->CloneWithNewOperands(
ShapeUtil::MakeTupleShape(
{new_backward_conv_shape, ShapeUtil::MakeShape(U8, {0})}),
{output, filter}));
new_backward_conv_call->set_window(new_backward_conv_window);
HloInstruction* new_backward_conv =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_backward_conv_shape, new_backward_conv_call, 0));
HloInstruction* new_backward_conv_scratch =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_backward_conv_call->shape().tuple_shapes(1),
new_backward_conv_call, 1));
std::vector<int64_t> start_indices(
new_backward_conv->shape().dimensions_size(), 0LL);
std::vector<int64_t> limit_indices(
new_backward_conv->shape().dimensions().begin(),
new_backward_conv->shape().dimensions().end());
std::vector<int64_t> strides(new_backward_conv->shape().dimensions_size(),
1LL);
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
if (padding_low > padding_high) {
start_indices[dim] += padding_low - padding_high;
} else if (padding_low < padding_high) {
limit_indices[dim] -= padding_high - padding_low;
}
}
Shape slice_shape =
ShapeInference::InferSliceShape(new_backward_conv->shape(), start_indices,
limit_indices, strides)
.value();
CHECK(ShapeUtil::Compatible(slice_shape, backward_conv_shape))
<< ShapeUtil::HumanString(slice_shape) << " vs "
<< ShapeUtil::HumanString(backward_conv_shape);
HloInstruction* slice = computation->AddInstruction(
HloInstruction::CreateSlice(backward_conv_shape, new_backward_conv,
start_indices, limit_indices, strides));
HloInstruction* new_tuple = computation->AddInstruction(
HloInstruction::CreateTuple({slice, new_backward_conv_scratch}));
VLOG(1) << "Canonicalizing backward input conv";
VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n "
<< new_tuple->ToString();
TF_CHECK_OK(computation->ReplaceInstruction(backward_conv, new_tuple));
return true;
}
absl::StatusOr<bool> GpuConvPaddingLegalization::RunOnComputation(
HloComputation* computation) {
bool changed = false;
std::vector<HloCustomCallInstruction*> convs;
for (auto* instr : computation->instructions()) {
if (IsCustomCallToDnnConvolution(*instr)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
for (HloCustomCallInstruction* instruction : convs) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(instruction));
changed |= [&] {
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
return CanonicalizeForwardConvolution(instruction);
case CudnnConvKind::kBackwardInput:
return CanonicalizeBackwardInputConvolution(instruction);
case CudnnConvKind::kBackwardFilter:
return CanonicalizeBackwardFilterConvolution(instruction);
}
}();
}
return changed;
}
absl::StatusOr<bool> GpuConvPaddingLegalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_conv_padding_legalization.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using GpuConvPaddingLegalizationTest = HloTestBase;
TEST_F(GpuConvPaddingLegalizationTest, BackwardInputConvolve) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule convolution_module
ENTRY %convolution (operand f64[2,2,2,3]{3,2,1,0}) -> (f64[2,2,4,4]{3,2,1,0}, u8[0]) {
%operand = f64[2,2,2,3]{3,2,1,0} parameter(0)
%kernel = f64[2,3,2,3]{3,2,1,0} constant(
{
{
{
{ 0.29629629629629628, 0.30246913580246915, 0.30864197530864196 },
{ 0.31481481481481483, 0.32098765432098764, 0.3271604938271605 }
},
{
{ 0.25925925925925924, 0.26543209876543211, 0.27160493827160492 },
{ 0.27777777777777779, 0.2839506172839506, 0.29012345679012347 }
},
{
{ 0.22222222222222221, 0.22839506172839505, 0.23456790123456789 },
{ 0.24074074074074073, 0.24691358024691357, 0.25308641975308643 }
}
},
{
{
{ 0.18518518518518517, 0.19135802469135801, 0.19753086419753085 },
{ 0.20370370370370369, 0.20987654320987653, 0.21604938271604937 }
},
{
{ 0.14814814814814814, 0.15432098765432098, 0.16049382716049382 },
{ 0.16666666666666666, 0.1728395061728395, 0.17901234567901234 }
},
{
{ 0.1111111111111111, 0.11728395061728394, 0.12345679012345678 },
{ 0.12962962962962962, 0.13580246913580246, 0.1419753086419753 }
}
}
})
%reverse = f64[2,3,2,3]{3,2,1,0} reverse(%kernel), dimensions={0,1}
ROOT %custom-call = (f64[2,2,4,4]{3,2,1,0}, u8[0]{0}) custom-call(f64[2,2,2,3]{3,2,1,0} %operand, f64[2,3,2,3]{3,2,1,0} %reverse), window={size=2x3 stride=2x2 pad=0_0x0_1}, dim_labels=bf01_01io->b01f, custom_call_target="__cudnn$convBackwardInput", backend_config="{\"algorithm\":\"0\",\"tensor_ops_enabled\":false,\"conv_result_scale\":1,\"activation_mode\":\"0\",\"side_input_scale\":0}"
}
)")
.value();
ASSERT_TRUE(GpuConvPaddingLegalization().Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget},
m::Op(), m::Reverse(m::Constant())),
0)),
m::GetTupleElement())));
auto slice = root->operand(0);
Shape expected_slice_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 4});
EXPECT_TRUE(ShapeUtil::Equal(slice->shape(), expected_slice_shape));
auto conv = slice->operand(0);
Shape expected_conv_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 5});
EXPECT_TRUE(ShapeUtil::Equal(conv->shape(), expected_conv_shape));
}
}
}
} | 2,070 |
#ifndef XLA_SERVICE_GPU_HORIZONTAL_LOOP_FUSION_H_
#define XLA_SERVICE_GPU_HORIZONTAL_LOOP_FUSION_H_
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuHorizontalLoopFusion : public HloModulePass {
public:
GpuHorizontalLoopFusion() = default;
explicit GpuHorizontalLoopFusion(absl::string_view prefix)
: prefix_(prefix) {}
absl::string_view name() const override {
return "gpu_horizontal_loop_fusion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation*);
std::string prefix_;
};
}
}
#endif
#include "xla/service/gpu/horizontal_loop_fusion.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/sub_byte_normalization.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
PrimitiveType GetUniqueOutputTypeOfFusible(const HloInstruction& fusible) {
auto outputs = GetOutputsOfFusible(fusible);
CHECK(!outputs.empty());
PrimitiveType first_output_type = outputs[0]->shape().element_type();
for (size_t i = 1; i < outputs.size(); ++i) {
PrimitiveType cur_output_type = outputs[i]->shape().element_type();
CHECK(first_output_type == cur_output_type)
<< "Output types are expected to be unique, but see "
<< PrimitiveType_Name(first_output_type) << " and "
<< PrimitiveType_Name(cur_output_type);
}
return first_output_type;
}
class HorizontalLoopFusionImpl {
public:
explicit HorizontalLoopFusionImpl(HloComputation* computation,
absl::string_view prefix)
: computation_(computation), prefix_(prefix) {}
~HorizontalLoopFusionImpl() = default;
absl::StatusOr<bool> Run();
private:
absl::Status Fuse(absl::Span<HloInstruction*> fused_fusion_instrs,
bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates);
absl::Status CreateFusedComputation(
absl::Span<HloInstruction*> fused_fusion_instrs,
std::unique_ptr<HloComputation>* uniq_computation,
std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion);
absl::StatusOr<bool> FuseConsumerOperands(
HloInstruction* consumer, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates);
class FusionCandidates {
public:
explicit FusionCandidates(HloInstruction* consumer,
bool sliced_input_fusion)
: fusible_instrs_(),
pos_(0),
sliced_input_fusion_(sliced_input_fusion) {
Initialize(consumer);
}
absl::Span<HloInstruction*> GetNextSpanOfFusions();
private:
void Initialize(HloInstruction*);
std::vector<HloInstruction*> fusible_instrs_;
size_t pos_;
bool sliced_input_fusion_;
};
HloComputation* computation_;
std::string prefix_;
};
bool IsFusibleCandidate(const HloInstruction& instr) {
if (!instr.control_successors().empty() ||
!instr.control_predecessors().empty()) {
return false;
}
if (IsNestableVariadicReduction(instr)) {
return false;
}
if (instr.IsElementwise() && instr.operand_count() > 0) {
return true;
}
if (!instr.IsLoopFusion()) {
return false;
}
auto outputs = GetOutputsOfFusible(instr);
CHECK(!outputs.empty());
const HloInstruction* first_output = outputs[0];
for (size_t i = 1; i < outputs.size(); ++i) {
if (first_output->shape().element_type() !=
outputs[i]->shape().element_type()) {
return false;
}
}
return true;
}
bool IsProfitableFusionCandidate(const HloInstruction& instr,
bool sliced_input_fusion) {
const int64_t kShapeThreshold =
sliced_input_fusion ? 128 * 2048 : 8192 * 8192;
const int64_t kInstrCountThreshold = sliced_input_fusion ? 30 : 128;
const HloInstruction* root = (instr.opcode() == HloOpcode::kFusion)
? instr.fused_expression_root()
: &instr;
if (root->opcode() == HloOpcode::kTuple) {
Shape shape = root->operand(0)->shape();
if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) {
VLOG(2) << "Profitable check failed due to element count with "
"sliced_input_fusion="
<< sliced_input_fusion;
return false;
}
} else {
Shape shape = root->shape();
if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) {
VLOG(2) << "Profiltable check failed due to element size with "
"sliced_input_fusion="
<< sliced_input_fusion;
return false;
}
}
if (instr.opcode() == HloOpcode::kFusion &&
instr.fused_instruction_count() > kInstrCountThreshold) {
return false;
}
return true;
}
bool HasOnlyRowMajorLayout(const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return LayoutUtil::IsMonotonicWithDim0Major(instr.shape().layout());
}
auto fused_instrs = instr.fused_instructions_computation()->instructions();
for (HloInstruction* i : fused_instrs) {
if (!LayoutUtil::IsDenseArray(i->shape())) {
continue;
}
if (!LayoutUtil::IsMonotonicWithDim0Major(i->shape().layout())) {
return false;
}
}
return true;
}
bool AnyOpndIsParamSharedAmongFusions(
const HloInstruction* instr,
const absl::flat_hash_set<HloInstruction*>& fusion_instrs) {
return absl::c_any_of(instr->operands(), [&](const HloInstruction* opnd) {
return opnd->opcode() == HloOpcode::kParameter &&
absl::c_any_of(opnd->users(), [&](const HloInstruction* user) {
return user != instr && fusion_instrs.contains(user);
});
});
}
void HorizontalLoopFusionImpl::FusionCandidates::Initialize(
HloInstruction* consumer) {
absl::flat_hash_set<HloInstruction*> fusible_candidates;
std::vector<HloInstruction*> ordered_fusible_candidates;
for (HloInstruction* opnd : consumer->operands()) {
HloInstruction* predecessor = opnd->LatestNonGteAncestor();
if (IsFusibleCandidate(*predecessor)) {
if (fusible_candidates.insert(predecessor).second) {
ordered_fusible_candidates.push_back(predecessor);
}
}
}
for (HloInstruction* instr : ordered_fusible_candidates) {
if (!IsConsumerTheOnlyNonRootUser(*instr, *consumer)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects maybe illegal instr " << instr->ToString()
<< "; including it may create cycles in HLO.";
continue;
} else if (!IsProfitableFusionCandidate(*instr, sliced_input_fusion_)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects may-not-be profitable fusion instr"
<< instr->ToString();
continue;
} else if (!HasOnlyRowMajorLayout(*instr)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects non-row-major fusion instr " << instr->ToString();
continue;
} else if (AnyOpndIsParamSharedAmongFusions(instr, fusible_candidates)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects the fusion instr because it shares parameter with"
<< " other fusion candidates, instr: " << instr->ToString();
continue;
} else {
VLOG(2) << "Find a fusion candidate " << instr->ToString();
fusible_instrs_.push_back(instr);
}
}
std::stable_sort(
fusible_instrs_.begin(), fusible_instrs_.end(),
[&](const HloInstruction* a, const HloInstruction* b) {
if (GetUniqueOutputTypeOfFusible(*a) !=
GetUniqueOutputTypeOfFusible(*b)) {
return GetUniqueOutputTypeOfFusible(*a) <
GetUniqueOutputTypeOfFusible(*b);
} else if (GetOutputSizeOfFusible(*a) != GetOutputSizeOfFusible(*b)) {
return GetOutputSizeOfFusible(*a) < GetOutputSizeOfFusible(*b);
} else if (GetInstrCountOfFusible(*a) != GetInstrCountOfFusible(*b)) {
return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b);
} else {
return ShapeUtil::ElementsIn(GetOutputsOfFusible(*a)[0]->shape()) <
ShapeUtil::ElementsIn(GetOutputsOfFusible(*b)[0]->shape());
}
});
}
absl::Span<HloInstruction*>
HorizontalLoopFusionImpl::FusionCandidates::GetNextSpanOfFusions() {
if (pos_ >= fusible_instrs_.size()) {
return absl::Span<HloInstruction*>();
}
const auto kMaxFusionBatchSize = [&]() -> int64_t {
if (sliced_input_fusion_) {
return 32;
} else {
if (fusible_instrs_[pos_]->opcode() == HloOpcode::kFusion) {
return 32;
} else {
return 64;
}
}
}();
size_t left = pos_;
size_t right = pos_ + 1;
size_t first_output_size = GetOutputSizeOfFusible(*fusible_instrs_[left]);
PrimitiveType first_output_type =
GetUniqueOutputTypeOfFusible(*fusible_instrs_[left]);
constexpr int64_t kMaxCudaParamSize = 4000;
size_t accum_io_size = 0;
size_t accum_num_outputs = 0;
for (; right < fusible_instrs_.size(); ++right) {
PrimitiveType cur_output_type =
GetUniqueOutputTypeOfFusible(*fusible_instrs_[right]);
if (first_output_type != cur_output_type) {
break;
}
if (first_output_size != GetOutputSizeOfFusible(*fusible_instrs_[right])) {
break;
}
if (GetInstrCountOfFusible(*fusible_instrs_[left]) !=
GetInstrCountOfFusible(*fusible_instrs_[right])) {
break;
}
if (!sliced_input_fusion_ &&
!ShapeUtil::EqualIgnoringElementType(
GetOutputsOfFusible(*fusible_instrs_[left])[0]->shape(),
GetOutputsOfFusible(*fusible_instrs_[right])[0]->shape())) {
break;
}
size_t num_outputs = GetOutputSizeOfFusible(*fusible_instrs_[right]);
accum_num_outputs += num_outputs;
if (accum_num_outputs >= kMaxFusionBatchSize) {
break;
}
accum_io_size += fusible_instrs_.at(right)->operand_count() + num_outputs;
if (accum_io_size * 8 >= kMaxCudaParamSize) {
break;
}
}
VLOG(2) << "horizontal fuse get instruction span with " << (right - left)
<< " instructions for sliced_input_fusion=" << sliced_input_fusion_
<< " fusion";
pos_ = right;
return absl::MakeSpan(fusible_instrs_).subspan(left, right - left);
}
absl::StatusOr<bool> HorizontalLoopFusionImpl::FuseConsumerOperands(
HloInstruction* consumer, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates) {
bool changed = false;
FusionCandidates loop_fusion_candidates(consumer, sliced_input_fusion);
while (true) {
auto fusibles = loop_fusion_candidates.GetNextSpanOfFusions();
if (fusibles.empty()) {
break;
} else if (fusibles.size() == 1) {
continue;
}
changed = true;
std::vector<HloInstruction*> fusion_instrs;
for (HloInstruction* instr : fusibles) {
if (instr->opcode() == HloOpcode::kFusion) {
fusion_instrs.push_back(instr);
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion_instr,
MakeFusionInstruction(instr, HloInstruction::FusionKind::kLoop));
fusion_instrs.push_back(fusion_instr);
}
}
TF_RETURN_IF_ERROR(Fuse(absl::MakeSpan(fusion_instrs), sliced_input_fusion,
to_fuse_candidates));
}
return changed;
}
absl::Status HorizontalLoopFusionImpl::CreateFusedComputation(
absl::Span<HloInstruction*> fused_fusion_instrs,
std::unique_ptr<HloComputation>* uniq_computation,
std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion) {
HloComputation::Builder b(prefix_ + "horizontally_fused_computation");
size_t fused_comp_param_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto old_params = fused_fusion_instrs[i]->fused_parameters();
for (size_t j = 0; j < old_params.size(); ++j) {
HloInstruction* bound_opnd = fused_fusion_instrs[i]->mutable_operand(j);
b.AddInstruction(HloInstruction::CreateParameter(
fused_comp_param_id++, bound_opnd->shape(),
absl::StrCat("param_", i, "_", j)));
bound_operands->push_back(bound_opnd);
}
}
HloInstruction* dummy_root = b.AddInstruction(
HloInstruction::CreateTuple(std::vector<HloInstruction*>{}));
*uniq_computation = b.Build(dummy_root);
HloComputation* comp = uniq_computation->get();
absl::flat_hash_map<const HloInstruction*, HloInstruction*> clone_map;
size_t new_param_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto old_params = fused_fusion_instrs[i]->fused_parameters();
for (size_t j = 0; j < old_params.size(); ++j) {
HloInstruction* old_param = old_params[j];
HloInstruction* new_param = comp->parameter_instruction(new_param_id++);
clone_map.insert({old_param, new_param});
}
}
const OpMetadata* metadata = nullptr;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto def_to_use_order = fused_fusion_instrs[i]
->fused_instructions_computation()
->MakeInstructionPostOrder();
for (HloInstruction* old_instr : def_to_use_order) {
if (old_instr->opcode() == HloOpcode::kParameter ||
(sliced_input_fusion && old_instr->opcode() == HloOpcode::kTuple &&
old_instr == fused_fusion_instrs[i]->fused_expression_root())) {
continue;
}
std::vector<HloInstruction*> new_opnds;
const auto& old_opnds = old_instr->operands();
new_opnds.reserve(old_opnds.size());
for (HloInstruction* old_opnd : old_opnds) {
CHECK(clone_map.find(old_opnd) != clone_map.end());
new_opnds.push_back(clone_map[old_opnd]);
}
HloInstruction* new_instr = comp->AddInstruction(
old_instr->CloneWithNewOperands(old_instr->shape(), new_opnds));
clone_map.insert({old_instr, new_instr});
metadata = &old_instr->metadata();
}
}
size_t fused_instr_output_size =
GetOutputSizeOfFusible(*fused_fusion_instrs[0]);
if (sliced_input_fusion) {
std::vector<HloInstruction*> concated_outputs;
for (size_t i = 0; i < fused_instr_output_size; ++i) {
std::vector<HloInstruction*> instr_outputs(fused_fusion_instrs.size());
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
HloInstruction* new_output = clone_map[old_output];
if (new_output->shape().dimensions_size() == 1) {
instr_outputs[j] = new_output;
} else {
Shape new_shape = ShapeUtil::MakeShapeWithDenseLayout(
new_output->shape().element_type(),
{ShapeUtil::ElementsIn(new_output->shape())},
std::vector<int64_t>(1, 0));
TF_ASSIGN_OR_RETURN(instr_outputs[j],
MakeReshapeHlo(new_shape, new_output));
}
}
TF_ASSIGN_OR_RETURN(HloInstruction * concated_output,
MakeConcatHlo(instr_outputs, 0));
concated_outputs.push_back(concated_output);
}
std::vector<HloInstruction*> output_slices(concated_outputs.size() *
fused_fusion_instrs.size());
for (size_t i = 0; i < concated_outputs.size(); ++i) {
HloInstruction* concated_output = concated_outputs[i];
int64_t slice_start = 0;
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
Shape shape = old_output->shape();
int64_t slice_limit = slice_start + ShapeUtil::ElementsIn(shape);
TF_ASSIGN_OR_RETURN(
output_slices[concated_outputs.size() * j + i],
MakeSliceHlo(concated_output, {slice_start}, {slice_limit},
{1}));
slice_start = slice_limit;
}
}
HloInstruction* tuple = comp->AddInstruction(
HloInstruction::CreateTuple(output_slices), metadata);
comp->set_root_instruction(tuple, true);
TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root));
} else {
std::vector<HloInstruction*> tuple_operands(fused_instr_output_size *
fused_fusion_instrs.size());
for (size_t i = 0; i < fused_instr_output_size; ++i) {
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
HloInstruction* new_output = clone_map[old_output];
tuple_operands[fused_instr_output_size * j + i] = new_output;
}
}
HloInstruction* tuple =
comp->AddInstruction(HloInstruction::CreateTuple(tuple_operands));
comp->set_root_instruction(tuple, true);
TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root));
}
return absl::OkStatus();
}
absl::Status HorizontalLoopFusionImpl::Fuse(
absl::Span<HloInstruction*> fused_fusion_instrs, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates) {
std::unique_ptr<HloComputation> uniq_computation;
std::vector<HloInstruction*> bound_operands;
TF_RETURN_IF_ERROR(CreateFusedComputation(fused_fusion_instrs,
&uniq_computation, &bound_operands,
sliced_input_fusion));
HloComputation* fused_comp = computation_->parent()->AddEmbeddedComputation(
std::move(uniq_computation));
HloInstruction* hori_fusion_instr = computation_->AddInstruction(
HloInstruction::CreateFusion(fused_comp->root_instruction()->shape(),
sliced_input_fusion
? HloInstruction::FusionKind::kInput
: HloInstruction::FusionKind::kLoop,
bound_operands, fused_comp, prefix_),
&fused_comp->root_instruction()->metadata());
fused_comp->SetFusionInstruction(hori_fusion_instr);
to_fuse_candidates.push_back(hori_fusion_instr);
size_t total_output_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
std::vector<HloInstruction*> bitcasts_or_gte;
HloInstruction* fused_instr = fused_fusion_instrs[i];
size_t num_out | #include "xla/service/gpu/horizontal_loop_fusion.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/instruction_fusion.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class HorizontalLoopFusionTest : public HloTestBase {
public:
static bool IsFusion(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kFusion;
}
};
TEST_F(HorizontalLoopFusionTest, BasicTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
fusion.1 = f16[1024]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[123]{0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[1024]{0}, f16[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(m::Fusion()))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Slice(m::Concatenate()),
m::Slice(m::Concatenate()))));
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForCycle) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForCycle
fused_computation.1 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
fusion.1 = f16[123]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
add.2 = f16[123]{0} add(fusion.1, arg.4)
fusion.2 = f16[123]{0}
fusion(add.2, arg.3), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[123]{0}, f16[123]{0}, f16[123]{0})
tuple(fusion.1, fusion.2, add.2)
}
)")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForIncompatibleTypes) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForIncompatibleTypes
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = s32[123]{0} parameter(0)
arg.2 = s32[123]{0} parameter(1)
ROOT add.1 = s32[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = s32[123]{0} parameter(2)
arg.4 = s32[123]{0} parameter(3)
fusion.1 = f16[1024]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = s32[123]{0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[1024]{0}, s32[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, FusingIntoKLoopAndKInputTogether) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule FusingIntoKLoopAndKInputTogether
fused_computation.1 {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.3 {
arg.1 = f16[130, 2048]{1, 0} parameter(0)
arg.2 = f16[130, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.4 {
arg.1 = f16[130, 2048]{1, 0} parameter(0)
arg.2 = f16[130, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.5 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
fused_computation.6 {
arg.1 = f16[128]{0} parameter(0)
arg.2 = f16[128]{0} parameter(1)
ROOT add.1 = f16[128]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
arg.3 = f16[129, 2048]{1, 0} parameter(2)
arg.4 = f16[129, 2048]{1, 0} parameter(3)
arg.5 = f16[130, 2048]{1, 0} parameter(4)
arg.6 = f16[130, 2048]{1, 0} parameter(5)
arg.7 = f16[130, 2048]{1, 0} parameter(6)
arg.8 = f16[130, 2048]{1, 0} parameter(7)
arg.9 = f16[123]{0} parameter(8)
arg.10 = f16[123]{0} parameter(9)
arg.11 = f16[128]{0} parameter(10)
arg.12 = f16[128]{0} parameter(11)
fusion.1 = f16[129,2048]{1, 0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[129,2048]{1, 0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
fusion.3 = f16[130,2048]{1, 0}
fusion(arg.5, arg.6), kind=kLoop, calls=fused_computation.3
fusion.4 = f16[130,2048]{1, 0}
fusion(arg.7, arg.8), kind=kLoop, calls=fused_computation.4
fusion.5 = f16[123]{0}
fusion(arg.9, arg.10), kind=kLoop, calls=fused_computation.5
fusion.6 = f16[128]{0}
fusion(arg.11, arg.12), kind=kLoop, calls=fused_computation.6
ROOT tuple.1 = (f16[129,2048]{1, 0}, f16[129,2048]{1, 0},
f16[130,2048]{1, 0}, f16[130,2048]{1, 0},
f16[123]{0}, f16[128]{0})
tuple(fusion.1, fusion.2, fusion.3, fusion.4, fusion.5, fusion.6)
}
)")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
int input_fusion_count = 0;
int loop_fusion_count = 0;
for (auto inst : module->entry_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kFusion) {
input_fusion_count +=
(inst->fusion_kind() == HloInstruction::FusionKind::kInput) ? 1 : 0;
loop_fusion_count +=
(inst->fusion_kind() == HloInstruction::FusionKind::kLoop) ? 1 : 0;
}
}
EXPECT_EQ(input_fusion_count, 1);
EXPECT_EQ(loop_fusion_count, 2);
}
TEST_F(HorizontalLoopFusionTest, HorizontalLoopFusionAfterVerticalFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
ENTRY MergeSharedFusionInstruction.Computation0 {
param.1.1 = f32[4,1024]{1,0} parameter(0)
param.1.2 = f32[4,1024]{1,0} parameter(1)
param.1.3 = f32[4,1024]{1,0} parameter(2)
param.2.1 = f32[321,5]{1,0} parameter(3)
param.2.2 = f32[321,5]{1,0} parameter(4)
param.2.3 = f32[321,5]{1,0} parameter(5)
const.1 = f32[] constant(3)
const.2 = f32[] constant(3)
broadcast.1 = f32[4,1024]{1,0} broadcast(const.1), dimensions={}
broadcast.2 = f32[321,5]{1,0} broadcast(const.2), dimensions={}
mul.1.1 = f32[4,1024]{1,0} multiply(param.1.1, param.1.2)
mul.1.2 = f32[4,1024]{1,0} multiply(param.1.3, broadcast.1)
add.1 = f32[4,1024]{1,0} add(mul.1.1, mul.1.2)
mul.2.1 = f32[321,5]{1,0} multiply(param.2.1, param.2.2)
mul.2.2 = f32[321,5]{1,0} multiply(param.2.3, broadcast.2)
add.2 = f32[321,5]{1,0} add(mul.2.1, mul.2.2)
ROOT tuple = (f32[4,1024]{1,0}, f32[321,5]{1,0}) tuple(add.1, add.2)
})")
.value();
HloPassPipeline fusion("fusion");
const se::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
fusion.AddPass<xla::gpu::GpuInstructionFusion>(false,
device_info);
fusion.AddPass<xla::gpu::GpuInstructionFusion>(true,
device_info);
EXPECT_TRUE(fusion.Run(module.get()).value());
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_instr = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(
m::Bitcast(m::GetTupleElement(m::Fusion(&fusion_instr))),
m::Bitcast(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion_instr->IsMultiOutputFusion());
EXPECT_THAT(fusion_instr->fused_expression_root(),
GmockMatch(m::Tuple(
m::Slice(m::Concatenate(m::Reshape(), m::Reshape())),
m::Slice(m::Concatenate(m::Reshape(), m::Reshape())))));
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, GradientDescentOptimizerLike) {
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> var_outs;
for (int64_t i = 0; i < 128; ++i) {
Shape shape = ShapeUtil::MakeShape(F32, {i + 1, 1024});
HloInstruction* param_var_in = builder.AddInstruction(
HloInstruction::CreateParameter(i * 3 + 0, shape, "var.in"));
HloInstruction* param_alpha =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 3 + 1, ShapeUtil::MakeShape(F32, {}), "alpha"));
HloInstruction* param_delta = builder.AddInstruction(
HloInstruction::CreateParameter(i * 3 + 2, shape, "delta"));
HloInstruction* alpha_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, param_alpha, {}));
HloInstruction* alpha_delta =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, alpha_broadcasted, param_delta));
HloInstruction* var_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, param_var_in, alpha_delta));
var_outs.push_back(var_out);
}
builder.AddInstruction(HloInstruction::CreateTuple(var_outs));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, FusingDifferentOutputs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule HeterogeneousMultiOutputFusions
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[1024]{0} parameter(2)
arg.4 = f16[1024]{0} parameter(3)
mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
mul.2 = f16[1024]{0} multiply(arg.3, arg.4)
add.1 = f16[1024]{0} add(mul.1, mul.2)
ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}) tuple(add.1, mul.1)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
add.1 = f16[123]{0} add(arg.1, arg.2)
add.2 = f16[123]{0} add(arg.3, arg.4)
mul.1 = f16[123]{0} multiply(add.1, add.2)
ROOT tuple.1 = (f16[123]{0}, f16[123]{0}) tuple(mul.1, add.1)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[1024]{0} parameter(2)
arg.4 = f16[1024]{0} parameter(3)
arg.5 = f16[123]{0} parameter(4)
arg.6 = f16[123]{0} parameter(5)
arg.7 = f16[123]{0} parameter(6)
arg.8 = f16[123]{0} parameter(7)
fusion.1 = (f16[1024]{0}, f16[1024]{0})
fusion(arg.1, arg.2, arg.3, arg.4),
kind=kLoop, calls=fused_computation.1
fusion.2 = (f16[123]{0}, f16[123]{0})
fusion(arg.5, arg.6, arg.7, arg.8),
kind=kLoop, calls=fused_computation.2
gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=0
gte.2 = f16[1024]{0} get-tuple-element(fusion.1), index=1
gte.3 = f16[123]{0} get-tuple-element(fusion.2), index=0
gte.4 = f16[123]{0} get-tuple-element(fusion.2), index=1
ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}, f16[123]{0}, f16[123]{0})
tuple(gte.1, gte.2, gte.3, gte.4)
}
)")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, RMSPropLike) {
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> all_outputs;
for (int64_t i = 0; i < 48; ++i) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 1024 + i});
HloInstruction* grad = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 0, shape, "grad"));
HloInstruction* ms = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 1, shape, "ms"));
HloInstruction* rho =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 2, ShapeUtil::MakeShape(F32, {}), "rho"));
HloInstruction* one_minus_rho =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 3, ShapeUtil::MakeShape(F32, {}), "one_minus_rho"));
HloInstruction* rho_broadcasted =
builder.AddInstruction(HloInstruction::CreateBroadcast(shape, rho, {}));
HloInstruction* one_mins_rho_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, one_minus_rho, {}));
HloInstruction* grad_squared = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad, grad));
HloInstruction* ms_1st_term = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad_squared,
one_mins_rho_broadcasted));
HloInstruction* ms_2nd_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, ms, rho_broadcasted));
HloInstruction* ms_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, ms_1st_term, ms_2nd_term));
HloInstruction* momentum = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 4, shape, "momemtum"));
HloInstruction* mom = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 5, shape, "mom"));
HloInstruction* lr = builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 6, ShapeUtil::MakeShape(F32, {}), "lr"));
HloInstruction* epsilon =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 7, ShapeUtil::MakeShape(F32, {}), "epsilon"));
HloInstruction* lr_broadcasted =
builder.AddInstruction(HloInstruction::CreateBroadcast(shape, lr, {}));
HloInstruction* epsilon_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, epsilon, {}));
HloInstruction* mom_1st_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, momentum, mom));
HloInstruction* ms_eps =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, ms_out, epsilon_broadcasted));
HloInstruction* ms_eps_rsq = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kRsqrt, ms_eps));
HloInstruction* grad_ms_eps_rsq =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, grad, ms_eps_rsq));
HloInstruction* mom_2nd_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, lr_broadcasted, grad_ms_eps_rsq));
HloInstruction* mom_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, mom_1st_term, mom_2nd_term));
HloInstruction* var = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 8, shape, "var"));
HloInstruction* var_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, var, mom_out));
all_outputs.push_back(ms_out);
all_outputs.push_back(mom_out);
all_outputs.push_back(var_out);
}
builder.AddInstruction(HloInstruction::CreateTuple(all_outputs));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1.0e-5, 1.0e-5}));
}
TEST_F(HorizontalLoopFusionTest, DynamicUpdateSlice) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForDynamicUpdateSlice
fusion.1 {
p.0 = f16[5,9,10]{2,1,0} parameter(0)
p.1 = s32[] parameter(1)
p.2 = f16[1,9,10]{2,1,0} parameter(2)
c.0 = s32[] constant(0)
ROOT %dynamic-update-slice =
f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0)
}
fusion.2 {
p.0 = f16[5,9,10]{2,1,0} parameter(0)
p.1 = s32[] parameter(1)
p.2 = f16[1,9,10]{2,1,0} parameter(2)
c.0 = s32[] constant(0)
ROOT %dynamic-update-slice =
f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0)
}
ENTRY entry {
p.00 = f16[5,9,10]{2,1,0} parameter(0)
p.01 = f16[5,9,10]{2,1,0} parameter(1)
p.10 = s32[] parameter(2)
p.11 = s32[] parameter(3)
p.20 = f16[1,9,10]{2,1,0} parameter(4)
p.21 = f16[1,9,10]{2,1,0} parameter(5)
f1 = f16[5,9,10] fusion(p.00, p.10, p.20), kind=kLoop, calls=fusion.1
f2 = f16[5,9,10] fusion(p.01, p.11, p.21), kind=kLoop, calls=fusion.2
ROOT tuple = (f16[5,9,10],f16[5,9,10]) tuple(f1, f2)
})")
.value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForSharedParam) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
fused_computation.1 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
fusion.1 = f16[123]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[123]{0}
fusion(arg.3, arg.2), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[123]{0}, f16[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, IterativeHorizontalFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NonfusionInstrs
fused_computation.0 {
arg.0 = f16[] parameter(0)
arg.1 = f16[123]{0} parameter(1)
broadcast.0 = f16[123]{0} broadcast(arg.0), dimensions={}
ROOT mul.1 = f16[123]{0} multiply(broadcast.0, arg.1)
}
fused_computation.1 {
arg.0 = f16[] parameter(0)
arg.1 = f16[456]{0} parameter(1)
broadcast.0 = f16[456]{0} broadcast(arg.0), dimensions={}
ROOT add.1 = f16[456]{0} add(broadcast.0, arg.1)
}
ENTRY entry_computation {
arg.0 = f16[] parameter(0)
arg.1 = f16[] parameter(1)
arg.2 = f16[123]{0} parameter(2)
arg.3 = f16[456]{0} parameter(3)
sqrt.0 = f16[] sqrt(arg.0)
sqrt.1 = f16[] sqrt(arg.1)
fusion.0 = f16[123]{0}
fusion(sqrt.0, arg.2), kind=kLoop, calls=fused_computation.0
fusion.1 = f16[456]{0}
fusion(sqrt.1, arg.3), kind=kLoop, calls=fused_computation.1
ROOT tuple.1 = (f16[123]{0}, f16[456]{0}) tuple(fusion.0, fusion.1)
}
)")
.value();
HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion");
iterative_h_fusion.AddPass<GpuHorizontalLoopFusion>();
iterative_h_fusion.AddPass<HloDCE>();
EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(m::Fusion()))));
EXPECT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
2);
}
TEST_F(HorizontalLoopFusionTest, TraversalOrder) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule cluster
%fused_computation (param_0: f32[256,256], param_1: f32[], param_2: f32[])
-> f32[256,256] {
%param_0 = f32[256,256]{1,0} parameter(0)
%param_1 = f32[] parameter(1)
%param_2 = f32[] parameter(2)
%multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2)
%broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={}
ROOT %multiply.1 = f32[256,256]{1,0}
multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0)
}
%fused_computation.1 (param_0: f32[256,256], param_1: f32[], param_2: f32[])
-> f32[256,256] {
%param_0 = f32[256,256]{1,0} parameter(0)
%param_1 = f32[] parameter(1)
%param_2 = f32[] parameter(2)
%multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2)
%broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={}
ROOT %multiply.1 = f32[256,256]{1,0}
multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0)
}
ENTRY %entry_computation (arg0: f32[256,256], arg1: f32[256,256], arg2: f32[],
arg3: f32[], arg4: f32[], arg5: f32[])
-> (f32[256,256], f32[256,256]) {
%arg0 = f32[256,256]{1,0} parameter(0), parameter_replication={false}
%arg1 = f32[256,256]{1,0} parameter(1), parameter_replication={false}
%arg2 = f32[] parameter(2), parameter_replication={false}
%arg3 = f32[] parameter(3), parameter_replication={false}
%arg4 = f32[] parameter(4), parameter_replication={false}
%arg5 = f32[] parameter(5), parameter_replication={false}
%sqrt = f32[] sqrt(f32[] %arg2)
%sqrt.1 = f32[] sqrt(f32[] %arg3)
%fusion = f32[256,256]{1,0}
fusion(f32[256,256]{1,0} %arg0, f32[] %sqrt, f32[] %sqrt.1),
kind=kLoop, calls=%fused_computation
%sqrt.2 = f32[] sqrt(f32[] %arg4)
%sqrt.3 = f32[] sqrt(f32[] %arg5)
%fusion.1 = f32[256,256]{1,0}
fusion(f32[256,256]{1,0} %arg1, f32[] %sqrt.2, f32[] %sqrt.3),
kind=kLoop, calls=%fused_computation.1
ROOT %tuple.163 = (f32[256,256]{1,0}, f32[256,256]{1,0})
tuple(f32[256,256]{1,0} %fusion.1, f32[256,256]{1,0} %fusion)
}
)")
.value();
HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion");
iterative_h_fusion.AddPass<GpuHorizontalLoopFusion>();
EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value());
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
2);
}
TEST_F(HorizontalLoopFusionTest, NoBufferAliasingOfDuplicateParameter) {
const char* hlo_text = R"(
HloModule m
branch_a {
p0 = s32[] parameter(0)
c0 = s32[] constant(1)
c1 = s32[] constant(2)
b0 = s32[4096] broadcast(c0), dimensions={}
b1 = s32[4096] broadcast(c1), dimensions={}
ROOT r = (s32[4096], s32[4096]) tuple(b0, b1)
}
branch_b {
p0 = s32[] parameter(0)
c0 = s32[] constant(1)
c1 = s32[] constant(2)
b0 = s32[4096] broadcast(c0), dimensions={}
b1 = s32[4096] broadcast(c1), dimensions={}
ROOT r = (s32[4096], s32[4096]) tuple(b0, b1)
}
ENTRY e {
p0 = s32[] parameter(0)
c0 = s32[] constant(0)
cond = (s32[4096], s32[4096]) conditional(p0, c0, c0), branch_computations={branch_a, branch_b}
p1 = s32[4096] parameter(1)
gte0 = s32[4096] get-tuple-element(cond), index=0
gte1 = s32[4096] get-tuple-element(cond), index=1
a0 = s32[4096] add(gte1, gte0)
m0 = s32[4096] multiply(gte1, gte0)
ROOT r = (s32[4096], s32[4096]) tuple(m0, a0)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, std::nullopt));
}
TEST_F(HorizontalLoopFusionTest, CopyInsertionFusionControlFlow) {
const char* hlo_text = R"(
HloModule cluster
ENTRY main {
cst = f32[1]{0} constant({0})
cp1 = f32[1]{0} copy(cst)
cp2 = f32[1]{0} copy(cst)
cp3 = f32[1]{0} copy(cst)
cp4 = f32[1]{0} copy(cst), control-predecessors={cp1}
ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cp1, cp2, cp3, cp4)
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_text).value();
EXPECT_TRUE(GpuHorizontalLoopFusion().Run(module.get()).value());
VLOG(2) << module->ToString();
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
1);
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
EXPECT_THAT(entry_root,
GmockMatch(m::Tuple(m::Copy(), m::GetTupleElement(m::Fusion()),
m::GetTupleElement(m::Fusion()), m::Copy())));
}
TEST_F(HorizontalLoopFusionTest, DoNotMergeVariadicReductions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.94 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax_func.1536 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=fused_computation.94
}
fused_computation {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
fused_computation2 {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(inf)
tmp_3 = s32[] constant(1)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation
tmp_2 = s32[554112]{0} get-tuple-element(tmp_1), index=1
tmp_3 = f32[554112,10]{1,0} parameter(1)
tmp_4 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_3), kind=kLoop, calls=fused_computation2
tmp_5 = s32[554112]{0} get-tuple-element(tmp_4), index=1
ROOT tmp_6 = s32[554112]{0} add(tmp_2, tmp_5)
})")
.value();
EXPECT_FALSE(GpuHorizontalLoopFusion().Run(module.get()).value());
}
}
}
} | 2,071 |
#ifndef XLA_SERVICE_GPU_TRITON_FUSION_NUMERICS_VERIFIER_H_
#define XLA_SERVICE_GPU_TRITON_FUSION_NUMERICS_VERIFIER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/stream_executor/stream.h"
namespace xla::gpu {
class TritonFusionNumericsVerifier : public HloModulePass {
public:
explicit TritonFusionNumericsVerifier(const AutotuneConfig& config)
: config_(config) {}
static absl::string_view Name() { return "triton-numerics-verifier"; }
absl::string_view name() const override { return Name(); }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
AutotuneConfig config_;
};
namespace triton_fusion_numerics_pass_internal {
absl::StatusOr<ScopedShapedBuffer> CompileAndRunFusion(
AutotunerCompileUtil& util, const HloFusionInstruction& fusion,
const AutotuneConfig& config, const DebugOptions& debug_opts,
bool clear_backend_config);
absl::Status CompareBuffers(const ScopedShapedBuffer& current,
const ScopedShapedBuffer& expected,
const Shape& shape, const HloModuleConfig& config,
se::Stream* stream);
absl::Status ForAllTritonFusions(
const HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::AnyInvocable<absl::Status(const HloFusionInstruction&)> fn);
}
}
#endif
#include "xla/service/gpu/triton_fusion_numerics_verifier.h"
#include <memory>
#include <optional>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/stream.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using ProfilingOutput = AutotunerCompileUtil::ProfilingOutput;
absl::StatusOr<const HloFusionInstruction*> AsTritonFusion(
const HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kFusion) {
return nullptr;
}
const HloFusionInstruction* fusion = Cast<HloFusionInstruction>(hlo);
TF_ASSIGN_OR_RETURN(auto gpu_config,
fusion->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
if (backend_config.kind() == kTritonFusionKind) {
return fusion;
}
return nullptr;
}
std::unique_ptr<HloModule> NewHloModuleFromFusion(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts,
bool clear_backend_config) {
std::unique_ptr<HloModule> new_module =
ExtractInstructionIntoNewModule(fusion);
if (clear_backend_config) {
new_module->entry_computation()->root_instruction()->clear_backend_config();
}
new_module->mutable_config().set_debug_options(debug_opts);
return new_module;
}
}
namespace triton_fusion_numerics_pass_internal {
absl::StatusOr<ScopedShapedBuffer> CompileAndRunFusion(
AutotunerCompileUtil& util, const HloFusionInstruction& fusion,
const AutotuneConfig& config, const DebugOptions& debug_opts,
bool clear_backend_config) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> executable,
util.Compile([&](const DebugOptions& opts) {
return NewHloModuleFromFusion(fusion, opts,
clear_backend_config);
}));
TF_ASSIGN_OR_RETURN(auto rz_buffers, RedzoneBuffers::FromInstruction(
fusion, config, debug_opts,
RedzoneBuffers::kAllInputs));
TF_ASSIGN_OR_RETURN(auto stream, config.GetStream());
TF_ASSIGN_OR_RETURN(std::optional<ProfilingOutput> profiling_output,
util.ProfileExecutable(executable.get(), stream,
rz_buffers.input_buffers(),
rz_buffers.input_shapes()));
if (!profiling_output.has_value()) {
return Internal("No output after a successful verification run.");
}
return std::move(profiling_output->output);
}
absl::Status CompareBuffers(const ScopedShapedBuffer& current,
const ScopedShapedBuffer& expected,
const Shape& shape, const HloModuleConfig& config,
se::Stream* stream) {
BufferComparator comparator(shape, config);
TF_ASSIGN_OR_RETURN(bool outputs_match,
comparator.CompareEqual(stream, current.root_buffer(),
expected.root_buffer()));
if (!outputs_match) {
return Internal("Triton fusion output does not match emitters output.");
}
return absl::OkStatus();
}
absl::Status ForAllTritonFusions(
const HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::AnyInvocable<absl::Status(const HloFusionInstruction&)> fn) {
for (HloComputation* computation :
module.MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
TF_ASSIGN_OR_RETURN(auto triton_fusion, AsTritonFusion(instruction));
if (triton_fusion != nullptr) {
TF_RETURN_IF_ERROR(fn(*triton_fusion));
}
}
}
return absl::OkStatus();
}
}
namespace {
absl::Status VerifyTritonFusion(AutotunerCompileUtil& util,
const HloFusionInstruction& fusion,
const AutotuneConfig& config,
const DebugOptions& debug_opts) {
TF_ASSIGN_OR_RETURN(auto triton_result,
triton_fusion_numerics_pass_internal::CompileAndRunFusion(
util, fusion, config, debug_opts,
false));
TF_ASSIGN_OR_RETURN(auto emitters_result,
triton_fusion_numerics_pass_internal::CompileAndRunFusion(
util, fusion, config, debug_opts,
true));
TF_ASSIGN_OR_RETURN(auto stream, config.GetStream());
return triton_fusion_numerics_pass_internal::CompareBuffers(
triton_result, emitters_result, fusion.shape(),
fusion.GetModule()->config(), stream);
}
}
absl::StatusOr<bool> TritonFusionNumericsVerifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (config_.IsDeviceless()) {
return absl::InternalError(
"Cannot run TritonFusionNumericsVerifier on a deviceless compilation.");
}
const DebugOptions& debug_options = module->config().debug_options();
TF_ASSIGN_OR_RETURN(std::optional<AutotunerCompileUtil> opt_compile_util,
AutotunerCompileUtil::Create(config_, debug_options));
TF_RET_CHECK(opt_compile_util.has_value());
TF_RETURN_IF_ERROR(triton_fusion_numerics_pass_internal::ForAllTritonFusions(
*module, execution_threads, [&](const HloFusionInstruction& fusion) {
return VerifyTritonFusion(*opt_compile_util, fusion, config_,
debug_options);
}));
return false;
}
} | #include "xla/service/gpu/triton_fusion_numerics_verifier.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla::gpu {
namespace {
class TritonFusionNumericsVerifierTest
: public HloTestBase,
public ::testing::WithParamInterface<PrimitiveType> {
public:
DebugOptions GetDebugOptionsForTest() override {
auto options = HloTestBase::GetDebugOptionsForTest();
options.set_xla_gpu_enable_triton_softmax_fusion(true);
options.set_xla_gpu_verify_triton_fusion_numerics(true);
return options;
}
protected:
std::unique_ptr<xla::HloModule> Module(absl::string_view hlo_text_template,
absl::string_view type) {
auto m = GetOptimizedModule(absl::Substitute(hlo_text_template, type));
TF_EXPECT_OK(m);
return std::move(m.value());
}
const HloFusionInstruction* TritonFusion(const xla::HloModule& module) {
const HloFusionInstruction* fusion_result = nullptr;
absl::Status res =
triton_fusion_numerics_pass_internal::ForAllTritonFusions(
module, {},
[&](const HloFusionInstruction& fusion) -> absl::Status {
EXPECT_EQ(fusion_result, nullptr);
fusion_result = &fusion;
return absl::OkStatus();
});
return fusion_result;
}
AutotuneConfig CreateAutotuneConfig() {
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
auto executors_or = PlatformUtil::GetStreamExecutors(platform);
TF_EXPECT_OK(executors_or);
return AutotuneConfig{DeviceConfig{executors_or->at(0), nullptr},
GetDebugOptionsForTest()};
}
AutotunerCompileUtil CreateAutotunerCompileUtil(AutotuneConfig& config) {
auto opt_compile_util_or =
AutotunerCompileUtil::Create(config, GetDebugOptionsForTest());
TF_EXPECT_OK(opt_compile_util_or);
EXPECT_TRUE(opt_compile_util_or->has_value());
return std::move(opt_compile_util_or->value());
}
};
constexpr absl::string_view kSoftmaxHlo = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
bool HloPassHasRun(const HloModule& module, absl::string_view pass_name) {
for (const auto& pass_metadata : module.metadata().proto().pass_metadata()) {
if (pass_metadata.pass_name() == pass_name) {
return true;
}
}
return false;
}
TEST_P(TritonFusionNumericsVerifierTest, VerifyExactSoftmaxFusionNumerics) {
PrimitiveType data_type = GetParam();
auto module = Module(kSoftmaxHlo,
primitive_util::LowercasePrimitiveTypeName(data_type));
EXPECT_TRUE(HloPassHasRun(*module, TritonFusionNumericsVerifier::Name()));
auto fusion = TritonFusion(*module);
EXPECT_NE(fusion, nullptr);
}
TEST_F(TritonFusionNumericsVerifierTest, CheckMismatch) {
auto module_f16 = Module(kSoftmaxHlo, "f16");
auto fusion_f16 = TritonFusion(*module_f16);
EXPECT_NE(fusion_f16, nullptr);
auto module_f32 = Module(kSoftmaxHlo, "f32");
auto fusion_f32 = TritonFusion(*module_f32);
EXPECT_NE(fusion_f32, nullptr);
AutotuneConfig autotune_config = CreateAutotuneConfig();
AutotunerCompileUtil compile_util =
CreateAutotunerCompileUtil(autotune_config);
const DebugOptions& debug_options = GetDebugOptionsForTest();
auto f16_result = triton_fusion_numerics_pass_internal::CompileAndRunFusion(
compile_util, *fusion_f16, autotune_config, debug_options,
false);
TF_EXPECT_OK(f16_result);
auto f32_result = triton_fusion_numerics_pass_internal::CompileAndRunFusion(
compile_util, *fusion_f32, autotune_config, debug_options,
false);
TF_EXPECT_OK(f32_result);
auto stream = autotune_config.GetStream();
TF_EXPECT_OK(stream);
auto cmp = triton_fusion_numerics_pass_internal::CompareBuffers(
*f16_result, *f32_result, fusion_f16->shape(),
fusion_f16->GetModule()->config(), *stream);
EXPECT_FALSE(cmp.ok());
}
INSTANTIATE_TEST_SUITE_P(TritonFusionNumericsVerifierTestSuite,
TritonFusionNumericsVerifierTest,
::testing::Values(F32, F16, BF16));
}
} | 2,072 |
#ifndef XLA_SERVICE_GPU_CUBLAS_PAD_FOR_GEMMS_H_
#define XLA_SERVICE_GPU_CUBLAS_PAD_FOR_GEMMS_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class CublasPadForGemms : public HloModulePass {
public:
CublasPadForGemms(const se::GpuComputeCapability gpu_compute_capability,
PrimitiveType datatype, int32_t pad_to_multiple_of)
: gpu_compute_capability_(gpu_compute_capability),
datatype_(datatype),
pad_to_multiple_of_(pad_to_multiple_of) {}
absl::string_view name() const override { return "cublas-pad-for-gemms"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::GpuComputeCapability gpu_compute_capability_;
PrimitiveType datatype_;
int32_t pad_to_multiple_of_;
};
}
}
#endif
#include "xla/service/gpu/cublas_pad_for_gemms.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gemm_fusion.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static absl::StatusOr<bool> PadForGemm(HloDotInstruction* dot,
PrimitiveType datatype,
int pad_to_multiple_of) {
auto* lhs = dot->mutable_operand(0);
auto* rhs = dot->mutable_operand(1);
Shape lshape = lhs->shape();
Shape rshape = rhs->shape();
Shape result_shape = dot->shape();
if (lshape.element_type() != datatype || rshape.element_type() != datatype) {
return false;
}
auto pad_dim = [&](Shape& s, int dim) {
s.set_dimensions(dim,
RoundUpTo<int64_t>(s.dimensions(dim), pad_to_multiple_of));
};
auto pad_matrix_dims = [&pad_dim](Shape s) {
pad_dim(s, s.rank() - 2);
pad_dim(s, s.rank() - 1);
return s;
};
Shape new_lshape = pad_matrix_dims(lshape);
Shape new_rshape = pad_matrix_dims(rshape);
Shape new_result_shape = pad_matrix_dims(result_shape);
if (new_lshape == lshape && new_rshape == rshape) {
return false;
}
VLOG(3) << "old shape: " << lshape << " " << rshape << " " << result_shape;
VLOG(3) << "new shape: " << new_lshape << " " << new_rshape << " "
<< new_result_shape;
auto create_padding_config = [](Shape& shape, Shape& new_shape) {
PaddingConfig padding_config;
for (int i = 0; i < shape.rank(); ++i) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_high(new_shape.dimensions()[i] -
shape.dimensions()[i]);
dimension->set_edge_padding_low(0);
dimension->set_interior_padding(0);
}
return padding_config;
};
auto l_padding_config = create_padding_config(lshape, new_lshape);
auto r_padding_config = create_padding_config(rshape, new_rshape);
HloComputation* parent = dot->parent();
HloInstruction* zero_float = parent->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(datatype)));
zero_float->set_metadata(dot->metadata());
HloInstruction* lpad = parent->AddInstruction(
HloInstruction::CreatePad(new_lshape, lhs, zero_float, l_padding_config));
lpad->set_metadata(dot->metadata());
HloInstruction* rpad = parent->AddInstruction(
HloInstruction::CreatePad(new_rshape, rhs, zero_float, r_padding_config));
rpad->set_metadata(dot->metadata());
HloInstruction* new_dot = parent->AddInstruction(
dot->CloneWithNewOperands(new_result_shape, {lpad, rpad}));
std::vector<int64_t> start_indices(result_shape.rank(), 0);
std::vector<int64_t> strides(result_shape.rank(), 1);
HloInstruction* slice = parent->AddInstruction(
HloInstruction::CreateSlice(result_shape, new_dot, start_indices,
result_shape.dimensions(), strides));
slice->set_metadata(dot->metadata());
bool is_root = dot->user_count() == 0;
TF_CHECK_OK(parent->ReplaceInstruction(dot, slice));
if (is_root) {
parent->set_root_instruction(slice);
}
return true;
}
namespace {
bool CheckCanonical(HloDotInstruction* dot) {
const auto& dimension_numbers = dot->dot_dimension_numbers();
if (dimension_numbers.lhs_batch_dimensions_size() + 2 !=
dot->operand(0)->shape().rank() ||
dimension_numbers.rhs_batch_dimensions_size() + 2 !=
dot->operand(1)->shape().rank()) {
VLOG(2)
<< dot->ToString()
<< " is not canonical: Expected all dimensions but 2 to be "
"batch_dimensions. Hence, this dot is not a candidate for padding.";
return false;
}
std::vector<int64_t> canonical_batch_dims(
dimension_numbers.lhs_batch_dimensions_size());
absl::c_iota(canonical_batch_dims, 0);
if (!absl::c_equal(dimension_numbers.lhs_batch_dimensions(),
canonical_batch_dims) ||
!absl::c_equal(dimension_numbers.rhs_batch_dimensions(),
canonical_batch_dims)) {
VLOG(2)
<< dot->ToString()
<< " is not canonical: Expected batch dimensions to be all "
"dimensions except for the last 2 ones. Hence, this dot is not a "
"candidate for padding.";
return false;
}
return true;
}
}
static std::vector<HloDotInstruction*> GetRelevantDots(
const se::GpuComputeCapability& gpu_compute_capability,
HloComputation* comp, PrimitiveType datatype) {
std::vector<HloDotInstruction*> gemms;
for (HloInstruction* instr : comp->instructions()) {
if (IsMatrixMultiplication(*instr)) {
HloDotInstruction* dot = Cast<HloDotInstruction>(instr);
if (instr->operand(0)->shape().element_type() == datatype &&
CheckCanonical(dot) &&
!(instr->GetModule()
->config()
.debug_options()
.xla_gpu_enable_triton_gemm() &&
legacy_triton::IsTritonSupportedInstruction(
*dot, gpu_compute_capability) &&
ShouldTritonHandleGEMM(*dot, gpu_compute_capability))) {
gemms.push_back(dot);
}
}
}
return gemms;
}
absl::StatusOr<bool> CublasPadForGemms::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloDotInstruction* dot :
GetRelevantDots(gpu_compute_capability_, comp, datatype_)) {
TF_ASSIGN_OR_RETURN(bool result,
PadForGemm(dot, datatype_, pad_to_multiple_of_));
changed |= result;
}
}
return changed;
}
}
} | #include "xla/service/gpu/cublas_pad_for_gemms.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
class CublasGemmPadForTensorCoresTest : public HloTestBase {
protected:
bool PadForF16Gemms(HloModule* module) {
return CublasPadForGemms(se::CudaComputeCapability(7, 0),
PrimitiveType::F16, 8)
.Run(module)
.value();
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_any(false);
return debug_options;
}
};
TEST_F(CublasGemmPadForTensorCoresTest, OneDotRootComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048,1024] parameter(0)
%param2 = f16[1024,33708] parameter(1)
ROOT %dot.2309 = f16[2048,33708]{1,0} dot(f16[2048,1024]{1,0} %param1,
f16[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(PadForF16Gemms(module.get()));
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(
m::Slice(m::Dot(m::Pad(m::Parameter().WithShape(F16, {2048, 1024}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {2048, 1024}),
m::Pad(m::Parameter().WithShape(F16, {1024, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {1024, 33712}))
.WithShape(F16, {2048, 33712})
.WithContractingDims({1},
{0}))
.WithShape(F16, {2048, 33708})));
}
TEST_F(CublasGemmPadForTensorCoresTest, OneDotS8RootComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = s8[2047,1023] parameter(0)
%param2 = s8[1023,33707] parameter(1)
ROOT %dot.2309 = s32[2047,33707]{1,0} dot(s8[2047,1023]{1,0} %param1,
s8[1023,33707]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(
CublasPadForGemms(se::CudaComputeCapability(7, 0), PrimitiveType::S8, 4)
.Run(module.get())
.value());
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(
m::Slice(m::Dot(m::Pad(m::Parameter().WithShape(S8, {2047, 1023}),
m::Constant().WithShape(S8, {}))
.WithShape(S8, {2048, 1024}),
m::Pad(m::Parameter().WithShape(S8, {1023, 33707}),
m::Constant().WithShape(S8, {}))
.WithShape(S8, {1024, 33708}))
.WithShape(S32, {2048, 33708})
.WithContractingDims({1},
{0}))
.WithShape(S32, {2047, 33707})));
}
TEST_F(CublasGemmPadForTensorCoresTest, TwoDotsComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048, 1024] parameter(0)
%param2 = f16[1024, 33708] parameter(1)
%param3 = f16[33708, 1] parameter(2)
%dot1 = f16[2048, 33708]{1,0} dot(f16[2048, 1024]{1,0} %param1,
f16[1024, 33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %dot2 = f16[2048, 1]{1,0} dot(f16[2048, 33708]{1,0} %dot1,
f16[33708, 1]{0,1} %param3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
EXPECT_TRUE(PadForF16Gemms(module.get()));
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* dot2 = nullptr;
ASSERT_THAT(
root,
GmockMatch(
m::Slice(
m::Dot(
m::Pad(m::Slice(m::Dot(&dot2,
m::Pad().WithShape(F16, {2048, 1024}),
m::Pad().WithShape(F16, {1024, 33712}))
.WithContractingDims(
{1},
{0})
.WithShape(F16, {2048, 33712}))
.WithShape(F16, {2048, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {2048, 33712}),
m::Pad(m::Parameter().WithShape(F16, {33708, 1}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {33712, 8}))
.WithShape(F16, {2048, 8})
.WithContractingDims({1},
{0}))
.WithShape(F16, {2048, 1})));
EXPECT_THAT(
dot2,
GmockMatch(m::Dot(m::Pad(m::Parameter().WithShape(F16, {2048, 1024}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {2048, 1024}),
m::Pad(m::Parameter().WithShape(F16, {1024, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {1024, 33712}))
.WithContractingDims({1},
{0})));
}
TEST_F(CublasGemmPadForTensorCoresTest, DotWithBatchDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[3, 5, 2048, 1024] parameter(0)
%param2 = f16[3, 5, 1024, 33708] parameter(1)
ROOT %dot.2309 = f16[3, 5, 2048, 33708]{3, 2, 1,0} dot(f16[3, 5, 2048, 1024]{3, 2, 1,0} %param1,
f16[3, 5, 1024, 33708]{2, 3, 0,1} %param2), lhs_batch_dims={0, 1}, rhs_batch_dims={0, 1}, lhs_contracting_dims={3}, rhs_contracting_dims={2}})")
.value();
EXPECT_TRUE(PadForF16Gemms(module.get()));
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(
m::Slice(
m::Dot(m::Pad(m::Parameter().WithShape(F16, {3, 5, 2048, 1024}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {3, 5, 2048, 1024}),
m::Pad(m::Parameter().WithShape(F16, {3, 5, 1024, 33708}),
m::Constant().WithShape(F16, {}))
.WithShape(F16, {3, 5, 1024, 33712}))
.WithShape(F16, {3, 5, 2048, 33712})
.WithContractingDims({3},
{2}))
.WithShape(F16, {3, 5, 2048, 33708})));
}
TEST_F(CublasGemmPadForTensorCoresTest, NoDotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %maximum = f32[] maximum(f32[] %x, f32[] %y)
})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, F32DotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f32[2048,1024] parameter(0)
%param2 = f32[1024,33708] parameter(1)
ROOT %dot.2309 = f32[2048,33708]{1,0} dot(f32[2048,1024]{1,0} %param1,
f32[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, F64DotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f64[2048,1024] parameter(0)
%param2 = f64[1024,33708] parameter(1)
ROOT %dot.2309 = f64[2048,33708]{1,0} dot(f64[2048,1024]{1,0} %param1,
f64[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, MultiplesOf8DotComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048,1024] parameter(0)
%param2 = f16[1024,33712] parameter(1)
ROOT %dot.2309 = f16[2048,33712]{1,0} dot(f16[2048,1024]{1,0} %param1,
f16[1024,33712]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
TEST_F(CublasGemmPadForTensorCoresTest, CheckSavingMetadata) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[2048,1024] parameter(0)
%param2 = f16[1024,33708] parameter(1)
ROOT %dot.2309 = f16[2048,33708]{1,0} dot(f16[2048,1024]{1,0} %param1,
f16[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0},
metadata={op_type="MatMul" op_name="transformer_v2/Transformer/decode/embedding_shared_weights_1/presoftmax_linear/MatMul"}
})")
.value();
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(PadForF16Gemms(module.get()));
auto metadata = module->entry_computation()->root_instruction()->metadata();
EXPECT_EQ("MatMul", metadata.op_type());
EXPECT_EQ(
"transformer_v2/Transformer/decode/embedding_shared_weights_1/"
"presoftmax_linear/MatMul",
metadata.op_name());
}
TEST_F(CublasGemmPadForTensorCoresTest, NotCanonicalizedDot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
%param1 = f16[3, 5, 2048, 1024] parameter(0)
%param2 = f16[3, 5, 1024, 33708] parameter(1)
ROOT %dot.2309 = f16[3,2048, 33708]{2, 1, 0} dot(f16[3, 5, 2048, 1024]{3, 2, 1, 0} %param1, f16[3, 5, 1024, 33708]{3, 2, 1, 0} %param2), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={3, 1}, rhs_contracting_dims={2, 1}})")
.value();
EXPECT_FALSE(PadForF16Gemms(module.get()));
}
}
}
} | 2,073 |
#ifndef XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_CYCLE_DECOMPOSER_H_
#define XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_CYCLE_DECOMPOSER_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePermuteCycleDecomposer : public HloModulePass {
public:
explicit CollectivePermuteCycleDecomposer(int64_t threshold_in_bytes)
: threshold_in_bytes_(threshold_in_bytes) {}
absl::string_view name() const override {
return "collective-permute-cycle-decomposer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t threshold_in_bytes_;
};
}
#endif
#include "xla/service/gpu/collective_permute_cycle_decomposer.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
using SourceTargetPair = std::pair<int64_t, int64_t>;
using SourceTargetPairs = std::vector<SourceTargetPair>;
enum class CycleType { kUnknown, kForward, kBackward };
CycleType ShouldDecomposeWithCycleType(
const HloCollectivePermuteInstruction& collective_permute,
int64_t threshold_in_bytes) {
if (!collective_permute.channel_id().has_value()) {
return CycleType::kUnknown;
}
if (collective_permute.operand_count() != 1) {
return CycleType::kUnknown;
}
const Shape& result_shape = collective_permute.shape();
if (result_shape.IsTuple()) {
return CycleType::kUnknown;
}
CHECK(result_shape.IsArray());
if (ShapeUtil::ByteSizeOf(result_shape) < threshold_in_bytes) {
return CycleType::kUnknown;
}
const SourceTargetPairs& pairs = collective_permute.source_target_pairs();
if (pairs.size() == 1) {
return CycleType::kUnknown;
}
return IsForwardCycle(pairs) ? CycleType::kForward
: IsBackwardCycle(pairs) ? CycleType::kBackward
: CycleType::kUnknown;
}
absl::Status GetFrontendAttributes(HloCollectivePermuteInstruction* cp,
CycleType cycle_type,
xla::FrontendAttributes& cp1_attr,
xla::FrontendAttributes& cp2_attr) {
cp1_attr = cp->frontend_attributes();
cp2_attr = cp->frontend_attributes();
auto validation_it =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
if (validation_it == cp->frontend_attributes().map().end() ||
validation_it->second == "invalid") {
return absl::OkStatus();
}
auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second);
if (!statusor_bounds.ok()) {
return statusor_bounds.status();
}
const std::vector<ReplicaGroup>& bounds = statusor_bounds.value();
if (bounds.size() < 2) {
return Internal("Invalid number of replica groups");
}
int64_t num_pairs = bounds.size();
auto backedge_start = cycle_type == CycleType::kBackward
? bounds.begin()
: bounds.begin() + num_pairs - 1;
auto other_edges_start =
cycle_type == CycleType::kBackward ? bounds.begin() + 1 : bounds.begin();
std::vector<ReplicaGroup> cp1_bounds(backedge_start, backedge_start + 1);
std::vector<ReplicaGroup> cp2_bounds(other_edges_start,
other_edges_start + num_pairs - 1);
auto bounds_to_string = [](const std::vector<ReplicaGroup> groups) {
return "{" +
absl::StrJoin(groups, ",",
[](std::string* out, const ReplicaGroup& value) {
absl::StrAppend(out, "{", value.replica_ids(0), ",",
value.replica_ids(1), "}");
}) +
"}";
};
std::string cp1_validation_str = bounds_to_string(cp1_bounds);
std::string cp2_validation_str = bounds_to_string(cp2_bounds);
(*cp1_attr.mutable_map())[kSendRecvValidationAttr] = cp1_validation_str;
(*cp2_attr.mutable_map())[kSendRecvValidationAttr] = cp2_validation_str;
return absl::OkStatus();
}
absl::Status DecomposeCollectivePermuteCycle(
HloCollectivePermuteInstruction* cp, HloComputation* computation,
HloModule* module, int64_t next_channel_id, CycleType cycle_type) {
const SourceTargetPairs& pairs = cp->source_target_pairs();
int64_t num_pairs = pairs.size();
auto backedge_start = cycle_type == CycleType::kBackward
? pairs.begin()
: pairs.begin() + num_pairs - 1;
auto other_edges_start =
cycle_type == CycleType::kBackward ? pairs.begin() + 1 : pairs.begin();
SourceTargetPairs backedge(backedge_start, backedge_start + 1);
SourceTargetPairs other_edges(other_edges_start,
other_edges_start + num_pairs - 1);
const OpMetadata& metadata = cp->metadata();
xla::FrontendAttributes cp1_attr, cp2_attr;
TF_RETURN_IF_ERROR(GetFrontendAttributes(cp, cycle_type, cp1_attr, cp2_attr));
HloInstruction* cp1 =
computation->AddInstruction(HloInstruction::CreateCollectivePermute(
cp->shape(), cp->mutable_operand(0), backedge,
cp->channel_id().value()));
cp1->set_metadata(metadata);
cp1->set_frontend_attributes(cp1_attr);
int64_t cp1_receiver = backedge.back().second;
HloInstruction* cp2 =
computation->AddInstruction(HloInstruction::CreateCollectivePermute(
cp->shape(), cp->mutable_operand(0), other_edges, next_channel_id));
cp2->set_metadata(metadata);
cp2->set_frontend_attributes(cp2_attr);
HloInstruction* partition =
computation->AddInstruction(HloInstruction::CreatePartitionId());
HloInstruction* constant = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(U32, cp1_receiver)));
HloInstruction* compare0 = computation->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), partition,
constant, Comparison::Direction::kEq));
HloInstruction* compare =
computation->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(PRED, cp1->shape().dimensions()), compare0, {}));
HloInstruction* recv_data =
computation->AddInstruction(HloInstruction::CreateTernary(
cp1->shape(), HloOpcode::kSelect, compare, cp1, cp2));
TF_RETURN_IF_ERROR(cp->ReplaceAllUsesWith(recv_data));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(cp));
return absl::OkStatus();
}
}
absl::StatusOr<bool> CollectivePermuteCycleDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
int64_t next_channel_id;
for (auto comp : module->computations(execution_threads)) {
for (auto hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kCollectivePermute) {
continue;
}
auto collective_permute = Cast<HloCollectivePermuteInstruction>(hlo);
CycleType cycle_type = ShouldDecomposeWithCycleType(*collective_permute,
threshold_in_bytes_);
if (cycle_type != CycleType::kUnknown) {
if (changed == false) {
next_channel_id = hlo_query::NextChannelId(*module);
changed = true;
}
TF_RETURN_IF_ERROR(DecomposeCollectivePermuteCycle(
collective_permute, comp, module, next_channel_id++, cycle_type));
}
}
}
return changed;
}
} | #include "xla/service/gpu/collective_permute_cycle_decomposer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
using CollectivePermuteCycleDecomposerTest = HloTestBase;
using ::testing::HasSubstr;
using CollectivePermuteDecomposerTest = HloTestBase;
TEST_F(CollectivePermuteDecomposerTest, DefaultChannelNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT start = u32[] collective-permute(p),
source_target_pairs={{0,1},{1,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteCycleDecomposerTest, TrivialNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteCycleDecomposerTest, BelowThresholdNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(33);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteCycleDecomposerTest, ForwardCycle) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[3,2] collective-permute(p), channel_id=1,
source_target_pairs={{0,1},{1,2},{2,3},{3,0}},
frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto check_metadata = [](const HloInstruction* inst) {
EXPECT_EQ(inst->metadata().op_name(), "op1/op2/add");
EXPECT_EQ(inst->metadata().source_file(), "foo/bar/mysource.py");
EXPECT_EQ(inst->metadata().source_line(), 35);
};
HloCollectivePermuteInstruction* cp1 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute"));
HloCollectivePermuteInstruction* cp2 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute.1"));
EXPECT_NE(cp1, nullptr);
EXPECT_NE(cp2, nullptr);
EXPECT_EQ(cp1->operand(0), cp2->operand(0));
EXPECT_GT(cp2->channel_id().value(), cp1->channel_id().value());
EXPECT_THAT(cp1->ToString(), HasSubstr("source_target_pairs={{3,0}}"));
EXPECT_THAT(cp1->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{3,10}}\""));
EXPECT_THAT(cp2->ToString(),
HasSubstr("source_target_pairs={{0,1},{1,2},{2,3}}"));
EXPECT_THAT(cp2->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{0,7},{1,8},{2,9}}\""));
check_metadata(cp1);
check_metadata(cp2);
}
TEST_F(CollectivePermuteCycleDecomposerTest, BackwardCycle) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,3},{1,0},{2,1},{3,2}},
frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteCycleDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto check_metadata = [](const HloInstruction* inst) {
EXPECT_EQ(inst->metadata().op_name(), "op1/op2/add");
EXPECT_EQ(inst->metadata().source_file(), "foo/bar/mysource.py");
EXPECT_EQ(inst->metadata().source_line(), 35);
};
HloCollectivePermuteInstruction* cp1 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute"));
HloCollectivePermuteInstruction* cp2 =
DynCast<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), "collective-permute.1"));
EXPECT_NE(cp1, nullptr);
EXPECT_NE(cp2, nullptr);
EXPECT_EQ(cp1->operand(0), cp2->operand(0));
EXPECT_GT(cp2->channel_id().value(), cp1->channel_id().value());
EXPECT_THAT(cp1->ToString(), HasSubstr("source_target_pairs={{0,3}}"));
EXPECT_THAT(cp1->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{0,7}}\""));
EXPECT_THAT(cp2->ToString(),
HasSubstr("source_target_pairs={{1,0},{2,1},{3,2}}"));
EXPECT_THAT(cp2->ToString(),
HasSubstr("_xla_send_recv_validation=\"{{1,8},{2,9},{3,10}}\""));
check_metadata(cp1);
check_metadata(cp2);
}
}
} | 2,074 |
#ifndef XLA_SERVICE_GPU_CUDNN_FUSED_CONV_REWRITER_H_
#define XLA_SERVICE_GPU_CUDNN_FUSED_CONV_REWRITER_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class CudnnFusedConvRewriter : public HloModulePass {
public:
CudnnFusedConvRewriter(se::CudaComputeCapability cc,
se::dnn::VersionInfo dnn_version,
int32_t toolkit_version)
: compute_capability_(cc),
dnn_version_(dnn_version),
toolkit_version_(toolkit_version) {}
CudnnFusedConvRewriter(se::RocmComputeCapability cc,
se::dnn::VersionInfo dnn_version,
int32_t toolkit_version)
: compute_capability_(cc),
dnn_version_(dnn_version),
toolkit_version_(toolkit_version) {}
absl::string_view name() const override {
return "cudnn-fused-convolution-rewriter";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::GpuComputeCapability compute_capability_;
const se::dnn::VersionInfo dnn_version_;
const int32_t toolkit_version_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_fused_conv_rewriter.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = match;
bool IsConvCustomCall(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall &&
(instr->custom_call_target() == kCudnnConvForwardCallTarget ||
instr->custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget);
}
bool IsConvDepthwise(const HloInstruction* instr) {
int64_t feature_group_count = instr->feature_group_count();
if (feature_group_count == 1) {
return false;
}
const HloInstruction* input = instr->operand(0);
int64_t input_feature_dimension =
instr->convolution_dimension_numbers().input_feature_dimension();
int64_t input_feature_count =
input->shape().dimensions(input_feature_dimension);
return input_feature_count == feature_group_count;
}
bool IsNonDepthwiseConvCustomCall(const HloInstruction* instr) {
return IsConvCustomCall(instr) && !IsConvDepthwise(instr);
}
bool IsROCm(se::GpuComputeCapability cc) {
return std::holds_alternative<se::RocmComputeCapability>(cc);
}
bool ShouldUseCudnnRuntimeFusion(const DebugOptions& debug_opts,
se::GpuComputeCapability cc) {
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(&cc);
if (cuda_cc != nullptr)
return debug_opts.xla_gpu_use_runtime_fusion() && cuda_cc->IsAtLeast(7, 5);
else
return true;
}
bool IsSuitableForCudnnRuntimeFusion(HloInstruction* conv) {
if (conv->operands().size() > 3) {
return false;
}
if (conv->operand(0)->shape().element_type() != F16) {
return false;
}
const Shape& shape = conv->operand(1)->shape();
int64_t num_input_features = shape.dimensions(
conv->convolution_dimension_numbers().kernel_input_feature_dimension());
int64_t num_output_features = shape.dimensions(
conv->convolution_dimension_numbers().kernel_output_feature_dimension());
if (num_input_features % 2 != 0 || num_output_features % 2 != 0) {
return false;
}
return true;
}
bool IsLosslesslyConvertibleTo(const HloInstruction* instr,
PrimitiveType dst_ty) {
if (instr->shape().element_type() == dst_ty) {
return true;
}
if (Match(instr, m::Convert(m::Op().WithElementType(dst_ty)))) {
return primitive_util::CastPreservesValues(dst_ty,
instr->shape().element_type());
}
if (instr->opcode() == HloOpcode::kConstant) {
if (!instr->shape().IsArray()) {
return false;
}
PrimitiveType orig_ty = instr->shape().element_type();
absl::StatusOr<Literal> converted1 = instr->literal().Convert(dst_ty);
if (!converted1.ok()) {
return false;
}
absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty);
if (!converted2.ok()) {
return false;
}
return instr->literal() == *converted2;
}
if (instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose) {
return IsLosslesslyConvertibleTo(instr->operand(0), dst_ty);
}
return false;
}
bool IsLosslesslyConvertibleToS8(const HloInstruction* instr) {
return IsLosslesslyConvertibleTo(instr, S8);
}
bool IsLosslesslyConvertibleToF16(const HloInstruction* instr) {
return IsLosslesslyConvertibleTo(instr, F16);
}
absl::StatusOr<HloInstruction*> EnsureIsConvBiasActivation(
HloInstruction* conv) {
CHECK_EQ(conv->opcode(), HloOpcode::kCustomCall);
if (conv->custom_call_target() == kCudnnConvBiasActivationForwardCallTarget) {
return conv;
}
if (conv->custom_call_target() == kCudnnConvForwardCallTarget) {
HloComputation* comp = conv->parent();
const Shape& shape = conv->shape().tuple_shapes(0);
int64_t num_output_features = shape.dimensions(
conv->convolution_dimension_numbers().output_feature_dimension());
PrimitiveType bias_ty;
if (primitive_util::IsIntegralType(shape.element_type())) {
bias_ty = F32;
} else {
bias_ty = shape.element_type();
}
auto bias = BroadcastZeros(comp, bias_ty, {num_output_features});
absl::InlinedVector<HloInstruction*, 3> new_operands(
conv->operands().begin(), conv->operands().end());
new_operands.push_back(bias);
HloInstruction* new_conv = comp->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(conv, new_conv));
new_conv->set_custom_call_target(kCudnnConvBiasActivationForwardCallTarget);
comp->parent()->SetAndUniquifyInstrName(new_conv,
"cudnn-conv-bias-activation");
return new_conv;
}
return FailedPrecondition("Unsupported conv: %s", conv->ToString());
}
absl::StatusOr<bool> FuseConvertTypeIntoConv(HloComputation* comp,
PrimitiveType conv_type,
PrimitiveType cvt_type) {
bool changed = false;
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv = nullptr;
auto tuple_elem =
m::GetTupleElement(m::Op(&conv).WithPredicate(IsConvCustomCall), 0)
.WithElementType(conv_type);
auto pattern =
m::Convert(tuple_elem.WithOneUser()).WithElementType(cvt_type);
if (!Match(instr, pattern)) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvertTypeIntoConv: ", conv->ToString());
})) {
continue;
}
Shape new_shape = conv->shape();
new_shape.mutable_tuple_shapes(0)->set_element_type(cvt_type);
HloInstruction* new_conv =
comp->AddInstruction(conv->CloneWithNewShape(new_shape));
comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name());
TF_ASSIGN_OR_RETURN(HloInstruction * new_gte,
MakeGetTupleElementHlo(new_conv, 0));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_gte));
changed = true;
}
return changed;
}
struct ConvConvertTypes {
PrimitiveType convolution_type;
PrimitiveType conversion_type;
};
absl::StatusOr<bool> FuseRemoveConvertInConv(HloComputation* comp) {
bool changed = false;
std::array<ConvConvertTypes, 3> types{{
{S32, F32},
{S8, F32},
{F32, S8},
}};
for (auto [conv_type, cvt_type] : types) {
TF_ASSIGN_OR_RETURN(bool curr_change,
FuseConvertTypeIntoConv(comp, conv_type, cvt_type));
changed |= curr_change;
}
return changed;
}
absl::StatusOr<bool> FuseConvAlpha(HloComputation* comp) {
bool changed = false;
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv = nullptr;
HloInstruction* gte = nullptr;
HloInstruction* alpha = nullptr;
auto pattern = m::MultiplyAnyOrder(
m::GetTupleElement(
>e, m::Op(&conv).WithPredicate(IsNonDepthwiseConvCustomCall), 0)
.WithOneUse(),
m::Broadcast(m::ConstantEffectiveScalar(&alpha)));
if (!Match(instr, pattern)) {
continue;
}
PrimitiveType alpha_ty = gte->shape().element_type() == F64 ? F64 : F32;
if (!IsLosslesslyConvertibleTo(alpha, alpha_ty)) {
continue;
}
TF_ASSIGN_OR_RETURN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.conv_result_scale() != 1) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvAlpha: ", conv->ToString());
})) {
continue;
}
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64));
config.set_conv_result_scale(alpha_f64.GetFirstElement<double>());
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(conv->parent()->ReplaceInstruction(instr, gte));
changed = true;
}
return changed;
}
class GraphString {
public:
GraphString() = default;
bool AppendOp(std::string op_name, HloInstruction* op,
std::vector<HloInstruction*> operands = {}) {
std::optional<int64_t> operand_uid;
int num_operands_in_graph = 0;
for (HloInstruction* operand : operands) {
if (OpInGraph(operand->unique_id())) {
num_operands_in_graph++;
if (num_operands_in_graph > 1) {
return false;
}
operand_uid = operand->unique_id();
}
}
graph_.emplace_back(OpDescriptor(
{op->unique_id(), op->shape().element_type(), op_name, operand_uid}));
return true;
}
void ChangeDataType(PrimitiveType type) {
DCHECK(!graph_.empty());
graph_.back().output_type = type;
}
std::string Graph() const {
std::string graph;
for (OpDescriptor op : graph_) {
graph.append(std::to_string(op.uid));
graph.append(":[" +
primitive_util::LowercasePrimitiveTypeName(op.output_type) +
"]");
graph.append(op.name);
graph.append("(");
if (op.operand.has_value()) {
graph.append(std::to_string(*op.operand));
}
graph.append(");");
}
return graph;
}
bool OpInGraph(int64_t uid, std::string op_name = "") const {
auto op_filter = [&](OpDescriptor op) -> bool {
if (op_name.empty()) {
return op.uid == uid;
} else {
return op.uid == uid && op.name == op_name;
}
};
return std::find_if(graph_.begin(), graph_.end(), op_filter) !=
graph_.end();
}
private:
struct OpDescriptor {
int64_t uid;
PrimitiveType output_type;
std::string name;
std::optional<int64_t> operand;
};
std::vector<OpDescriptor> graph_;
};
bool IsF8Type(const HloInstruction* instr) {
return primitive_util::IsF8Type(instr->shape().element_type());
}
bool IsScalar(const HloInstruction* instr) {
return ShapeUtil::IsScalar(instr->shape());
}
std::optional<PrimitiveType> IsSaturatingCastToF8(HloInstruction* instr) {
HloInstruction *op, *clamp_lower, *clamp_upper;
if (Match(instr,
m::Convert(
&op,
m::Clamp(m::Broadcast(m::ConstantScalar(&clamp_lower)), m::Op(),
m::Broadcast(m::ConstantScalar(&clamp_upper))))) &&
((op->shape().element_type() == F8E4M3FN &&
clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::lowest())) &&
clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::max()))) ||
(op->shape().element_type() == F8E5M2 &&
clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::lowest())) &&
clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::max()))))) {
return op->shape().element_type();
}
return std::nullopt;
}
bool AppliesMaxReduce(HloInstruction* op) {
HloComputation* reduce_comp = op->to_apply();
HloInstruction* reduce_comp_root = reduce_comp->root_instruction();
return ShapeUtil::IsScalar(op->shape()) &&
ShapeUtil::IsScalar(op->operand(1)->shape()) &&
op->operand(1)->IsConstant() &&
op->operand(1)->literal().GetAsDouble({}) <= 0. &&
reduce_comp_root->opcode() == HloOpcode::kMaximum &&
reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter &&
reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter;
}
void CaptureConvGraphRecursive(HloInstruction* instr,
std::vector<HloInstruction*>& operands,
std::vector<HloInstruction*>& aux_outputs,
GraphString& graph_string,
absl::flat_hash_set<int>& visited_instrs,
HloInstruction*& final_instr) {
if (!visited_instrs.emplace(instr->unique_id()).second) {
return;
}
final_instr = instr;
GraphString init_graph_string = graph_string;
std::vector<HloInstruction*> init_operands = operands,
init_aux_outputs = aux_outputs;
int num_linear_users = 0, num_nonlinear_users = 0;
for (HloInstruction* user : instr->users()) {
HloInstruction *op, *operand0, *operand1;
if (Match(user, m::AddAnyOrder(&op, m::Op(&operand0), m::Op(&operand1)))) {
if (graph_string.AppendOp("add", op, {operand0, operand1})) {
operands.push_back(operand0 == instr ? operand1 : operand0);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::MultiplyAnyOrder(&op, m::Op(&operand0),
m::Broadcast(m::Op(&operand1)))) &&
ShapeUtil::IsScalar(operand1->shape())) {
if (graph_string.AppendOp("scale", op, {operand0, operand1})) {
operands.push_back(operand1);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::Divide(&op, m::Op(&operand0),
m::Broadcast(m::Op(&operand1)))) &&
ShapeUtil::IsScalar(operand1->shape())) {
if (graph_string.AppendOp("invscale", op, {operand0, operand1})) {
operands.push_back(operand1);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::MaximumAnyOrder(&op, m::Op(&operand0),
m::Broadcast(m::ConstantScalar(0))))) {
if (graph_string.AppendOp("relu", op, {operand0})) {
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::Reduce(&op, m::Op(&operand0), m::Op())) &&
graph_string.OpInGraph(operand0->unique_id(), "relu") &&
AppliesMaxReduce(op)) {
if (graph_string.AppendOp("amax", op, {operand0})) {
aux_outputs.emplace_back(op);
num_nonlinear_users++;
}
continue;
}
if (!user->users().empty()) {
HloInstruction* users_user = user->users()[0];
std::optional<PrimitiveType> f8_type = IsSaturatingCastToF8(users_user);
if (f8_type.has_value()) {
graph_string.ChangeDataType(f8_type.value());
num_linear_users++;
CaptureConvGraphRecursive(users_user, operands, aux_outputs,
graph_string, visited_instrs, final_instr);
continue;
}
if (Match(users_user,
m::Reduce(&op, m::Abs(m::Op(&operand0)), m::Op())) &&
AppliesMaxReduce(op)) {
if (graph_string.AppendOp("amax", op, {operand0})) {
aux_outputs.emplace_back(op);
num_nonlinear_users++;
}
continue;
}
}
}
if (num_linear_users > 1 || num_nonlinear_users > 1 ||
num_linear_users + num_nonlinear_users < instr->user_count()) {
graph_string = init_graph_string;
operands = init_operands;
aux_outputs = init_aux_outputs;
final_instr = instr;
}
}
absl::StatusOr<
std::tuple<std::vector<HloInstruction*>, std::vector<HloInstruction*>,
GraphString, HloInstruction*>>
CaptureConvGraph(HloInstruction* instr, HloInstruction* convolution,
HloInstruction* wide_input, HloInstruction* wide_filter,
HloInstruction* input_scale, HloInstruction* filter_scale,
bool x_mult_scale, bool w_mult_scale) {
GraphString graph_string;
graph_string.AppendOp("conv", instr);
HloInstruction *input_scaled_conv, *filter_scaled_conv;
if (input_scale) {
TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(0, wide_input));
HloInstruction* bcast_input_scale = instr->AddInstruction(
HloInstruction::CreateBroadcast(instr->shape(), input_scale, {}));
input_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary(
instr->shape(),
x_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide, instr,
bcast_input_scale));
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(input_scaled_conv));
}
if (filter_scale) {
TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(1, wide_filter));
HloInstruction* bcast_filter_scale = instr->AddInstruction(
HloInstruction::CreateBroadcast(instr->shape(), filter_scale, {}));
filter_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary(
instr->shape(),
w_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide,
input_scale ? input_scaled_conv : instr, bcast_filter_scale));
TF_RETURN_IF_ERROR((input_scale ? input_scaled_conv : instr)
->ReplaceAllUsesWith(filter_scaled_conv));
}
std::vector<HloInstruction*> operands, aux_outputs;
absl::flat_hash_set<int> visited_instrs;
HloInstruction* final_instr;
CaptureConvGraphRecursive(instr, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
return std::make_tuple(operands, aux_outputs, graph_string, final_instr);
}
absl::StatusOr<bool> F8GraphConv(HloComputation* comp,
se::CudaComputeCapability cc,
se::dnn::VersionInfo dnn_version,
int32_t toolkit_version) {
bool changed = false;
if (dnn_version < se::dnn::VersionInfo(8, 9, 0)) {
return false;
}
if (toolkit_version < 12000) {
return false;
}
if (!cc.IsAtLeast(se::CudaComputeCapability::HOPPER)) {
return false;
}
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction *convolution, *gte, *input, *filter,
*input_scale = nullptr, *filter_scale = nullptr,
*input_scale_op = nullptr, *filter_scale_op = nullptr,
*wide_input = nullptr, *wide_filter = nullptr;
auto conv_operand_maybe_scaled = [](HloInstruction** operand,
HloInstruction** wide_operand,
HloInstruction** scale_op,
HloInstruction** scale) {
return m::AnyOf<HloInstruction>(
m::Op(operand).WithPredicate(IsF8Type),
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Divide(
scale_op,
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Broadcast(m::Op(scale).WithPredicate(IsScalar))),
m::MultiplyAnyOrder(
scale_op,
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Broadcast(m::Op(scale).WithPredicate(IsScalar))));
};
auto pattern = m::GetTupleElement(
>e, | #include "xla/service/gpu/cudnn_fused_conv_rewriter.h"
#include <array>
#include <memory>
#include <string>
#include <string_view>
#include <thread>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/convert_mover.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_mover.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
namespace m = match;
using ::testing::HasSubstr;
using ::testing::Not;
const auto* kf16f32f64 = new std::vector<std::string>({"f16", "f32", "f64"});
const auto* kf16f32 = new std::vector<std::string>({"f16", "f32"});
class CudnnFusedConvRewriterHloTest : public HloTestBase {
public:
bool IsCuda() {
return std::holds_alternative<se::CudaComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
int32_t GetToolkitVersion() const {
#if GOOGLE_CUDA
return CUDA_VERSION;
#elif TENSORFLOW_USE_ROCM
return TF_ROCM_VERSION;
#endif
return 0;
}
CudnnFusedConvRewriterHloTest()
: HloTestBase(false,
false,
{}) {}
};
class CudnnFusedConvRewriterTest : public GpuCodegenTest {
public:
bool IsCuda() {
return std::holds_alternative<se::CudaComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
int32_t GetToolkitVersion() const {
#if GOOGLE_CUDA
return CUDA_VERSION;
#elif TENSORFLOW_USE_ROCM
return TF_ROCM_VERSION;
#endif
return 0;
}
protected:
std::string GetOptimizedHlo(absl::string_view hlo_string) {
HloModuleConfig config = GetModuleConfigForTest();
DebugOptions debug_opts = config.debug_options();
debug_opts.add_xla_disable_hlo_passes("cudnn_vectorize_convolutions");
debug_opts.set_xla_gpu_use_runtime_fusion(true);
config.set_debug_options(debug_opts);
auto result = backend().compiler()->RunHloPasses(
ParseAndReturnVerifiedModule(hlo_string, config).value(),
backend().default_stream_executor(), backend().memory_allocator());
if (!result.status().ok()) {
TF_EXPECT_OK(result.status())
<< "HLO compilation failed: " << result.status();
return "";
}
HloPrintOptions print_opts;
print_opts.set_print_operand_shape(false);
return (*result)->ToString(print_opts);
}
void TestMatchWithAllTypes(absl::string_view hlo_string) {
for (absl::string_view type : *(IsCuda() ? kf16f32f64 : kf16f32)) {
const std::string hlo_with_new_type =
absl::StrReplaceAll(hlo_string, {{"TYPE", type}});
std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type);
EXPECT_THAT(optimized_hlo_string,
Not(HasSubstr(kCudnnConvForwardCallTarget)))
<< optimized_hlo_string;
EXPECT_THAT(optimized_hlo_string,
HasSubstr(kCudnnConvBiasActivationForwardCallTarget));
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_with_new_type));
DebugOptions debug_opts = module->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
module->mutable_config().set_debug_options(debug_opts);
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0.01}))
<< optimized_hlo_string;
}
}
void TestClamp(absl::string_view pre_hlo_string,
absl::string_view post_hlo_string) {
std::string alpha_conv_scalar, alpha_side_input_scalar;
std::string elementwise_type;
std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string);
EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert")));
EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv"));
EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.01}))
<< pre_hlo_string;
absl::StatusOr<bool> filecheck_result =
RunFileCheck(optimized_hlo_string, post_hlo_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
}
void TestNotMatchWithAllTypes(absl::string_view hlo_string) {
for (absl::string_view type : *(IsCuda() ? kf16f32f64 : kf16f32)) {
const std::string hlo_with_new_type =
absl::StrReplaceAll(hlo_string, {{"TYPE", type}});
std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type);
SCOPED_TRACE(optimized_hlo_string);
EXPECT_THAT(optimized_hlo_string, HasSubstr(kCudnnConvForwardCallTarget));
EXPECT_THAT(optimized_hlo_string,
Not(HasSubstr(kCudnnConvBiasActivationForwardCallTarget)));
}
}
void TestF8(std::string pre_hlo_string, std::string custom_call_string,
std::string serialized_graph_string) {
if (!IsCuda()) return;
if (GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string);
EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert")));
EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv"));
EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.15, 0.15}))
<< pre_hlo_string;
absl::StatusOr<bool> filecheck_result =
RunFileCheck(optimized_hlo_string, custom_call_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
filecheck_result =
RunFileCheck(optimized_hlo_string, serialized_graph_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
} else {
std::string::size_type p0 = custom_call_string.find(':');
std::string::size_type p1 = custom_call_string.find("custom-call");
custom_call_string.erase(p0 + 1, p1 - p0 - 2);
p0 = custom_call_string.find(", dim_labels");
custom_call_string.erase(p0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(pre_hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHloPass(GpuConvRewriter(GetCudaComputeCapability()),
module.get()));
EXPECT_TRUE(changed);
RunAndFilecheckHloRewrite(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
CudnnFusedConvRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0},
GetDnnVersion(), GetToolkitVersion()),
custom_call_string);
RunAndFilecheckHloRewrite(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
CudnnFusedConvRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0},
GetDnnVersion(), GetToolkitVersion()),
serialized_graph_string);
}
}
void TestF8Parameterized(std::string template_pre_hlo_string,
std::string template_custom_call_string,
std::string template_serialized_graph_string) {
std::array<absl::string_view, 2> types = {"f8e4m3fn", "f8e5m2"};
std::array<absl::string_view, 2> clamp_lower = {"-448.", "-57344."};
std::array<absl::string_view, 2> clamp_upper = {"448.", "57344."};
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
for (int i = 0; i < 2; ++i) {
replacements["<<InputType>>"] = types[i];
for (int j = 0; j < 2; ++j) {
replacements["<<FilterType>>"] = types[j];
for (int k = 0; k < 2; ++k) {
replacements["<<OutputType>>"] = types[k];
replacements["<<ClampLower>>"] = clamp_lower[k];
replacements["<<ClampUpper>>"] = clamp_upper[k];
TestF8(absl::StrReplaceAll(template_pre_hlo_string, replacements),
absl::StrReplaceAll(template_custom_call_string, replacements),
absl::StrReplaceAll(template_serialized_graph_string,
replacements));
}
}
}
}
};
#if GOOGLE_CUDA
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8900)
#define MAYBE_SKIP_TEST(CAUSE) \
do { \
if (absl::string_view(CAUSE) == "F8") \
GTEST_SKIP() << "FP8 convolutions require CUDA 12 and cuDNN 8.9."; \
} while (0)
#else
#define MAYBE_SKIP_TEST(CAUSE)
#endif
#else
#define MAYBE_SKIP_TEST(CAUSE) \
do { \
GTEST_SKIP() << "ROCm does not support " CAUSE " fusion"; \
} while (0)
#endif
TEST_F(CudnnFusedConvRewriterTest, TestConvOnly) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
ROOT relu = TYPE[1,32,9,9] maximum(zeros, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseReluWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,1,17] parameter(1)
conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17
ROOT relu = TYPE[1,17,9,9] maximum(zeros, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestBias) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, Test3D) {
std::string body = R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,5,7,64] broadcast(zero), dimensions={}
input = TYPE[1,3,5,7,64] parameter(0)
filter = TYPE[3,3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,5,7,64] convolution(input, filter), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f, feature_group_count=1
broadcasted_bias = TYPE[1,3,5,7,64] broadcast(bias), dimensions={4}
add1 = TYPE[1,3,5,7,64] add(conv, broadcasted_bias)
)";
std::string relu = R"(
ROOT relu = TYPE[1,3,5,7,64] maximum(zeros, add1)
})";
std::string elu = R"(
cmp = pred[1,3,5,7,64] compare(add1, zeros), direction=GT
expm1 = TYPE[1,3,5,7,64] exponential-minus-one(add1)
ROOT elu = TYPE[1,3,5,7,64] select(cmp, add1, expm1)
})";
TestMatchWithAllTypes(body + relu);
if (!IsCuda()) TestMatchWithAllTypes(body + elu);
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasMultiCall) {
std::string code = R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,<<<format>>>,64] broadcast(zero), dimensions={}
input = TYPE[1,<<<format>>>,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,<<<format>>>,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,<<<format>>>,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,<<<format>>>,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,<<<format>>>,64] maximum(zeros, add1)
})";
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<<format>>>"] = "3,3";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
replacements["<<<format>>>"] = "5,5";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
replacements["<<<format>>>"] = "3,3";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasNoRelu) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
ROOT add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseBiasWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestElu) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
expm1 = TYPE[1,3,3,64] exponential-minus-one(sum)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseEluWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
expm1 = TYPE[1,3,3,64] exponential-minus-one(sum)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestRelu6) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
six = TYPE[] constant(6)
sixes = TYPE[1,3,3,64] broadcast(six), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu6 = TYPE[1,3,3,64] clamp(zeros, sum, sixes)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestRelu6OddChannels) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zeros = TYPE[1,384,1024,32] broadcast(TYPE[] constant(0)), dimensions={}
sixes = TYPE[1,384,1024,32] broadcast(TYPE[] constant(6)), dimensions={}
input = TYPE[1,769,2049,3] parameter(0)
filter = TYPE[32,3,3,3] parameter(1)
bias = TYPE[32] parameter(2)
conv = TYPE[1,384,1024,32] convolution(input, filter), window={size=3x3 stride=2x2}, dim_labels=b01f_o01i->b01f
broadcasted_bias = TYPE[1,384,1024,32] broadcast(bias), dimensions={3}
sum = add(conv, broadcasted_bias)
ROOT relu6 = clamp(zeros, sum, sixes)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestLeakyRelu) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP()
<< "Conv-Bias-LeakyRelu fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha = TYPE[] constant(0.2)
alphas = TYPE[1,3,3,64] broadcast(alpha), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
mul = TYPE[1,3,3,64] multiply(sum, alphas)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, mul)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestSideInputOnly) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
add1 = TYPE[1,3,3,64] add(conv, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseSideInputWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
add1 = TYPE[1,3,3,64] add(conv, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasAndSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
bias = TYPE[64] parameter(3)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
add2 = TYPE[1,3,3,64] add(add1, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConv) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
alpha_conv = TYPE[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = TYPE[1,32,9,9] multiply(conv, alpha_conv)
ROOT relu = TYPE[1,32,9,9] maximum(zeros, scaled_conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseScaledDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,1,17] parameter(1)
conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17
alpha_conv = TYPE[1,17,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = TYPE[1,17,9,9] multiply(conv, alpha_conv)
ROOT relu = TYPE[1,17,9,9] maximum(zeros, scaled_conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestNoCrashOnInf) {
EXPECT_TRUE(RunAndCompare(R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(inf)
zeros = f32[1,32,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = f32[] constant(0.999994934)
input = f32[1,17,9,9] parameter(0)
filter = f32[3,3,17,32] parameter(1)
conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
alpha_conv = f32[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = f32[1,32,9,9] multiply(conv, alpha_conv)
ROOT relu = f32[1,32,9,9] maximum(zeros, scaled_conv)
})",
ErrorSpec{0.01}));
}
TEST_F(CudnnFusedConvRewriterTest, TestConvAndScaledSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseDepthwiseConvWithScaledSideInput) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv)
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(scaled_conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInputWithBias) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
bias = TYPE[64] parameter(3)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv)
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(scaled_conv, broadcasted_bias)
add2 = TYPE[1,3,3,64] add(add1, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestMatchMaxZeroOnly) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
point_one = TYPE[] constant(0.1)
point_ones = TYPE[1,32,9,9] broadcast(point_one), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
ROOT relu = TYPE[1,32,9,9] maximum(point_ones, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, PreservesMetadata) {
const char* kHloString = R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,32,9,9] broadcast(zero), dimensions={}
input = f32[1,17,9,9] parameter(0)
filter = f32[3,3,17,32] parameter(1)
conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1, metadata={op_type="foo" op_name="bar"}
ROOT relu = f32[1,32,9,9] maximum(zeros, conv)
})";
const std::string optimized_hlo_string =
backend()
.compiler()
->RunHloPasses(
ParseAndReturnVer | 2,075 |
#ifndef XLA_SERVICE_GPU_KERNEL_REUSE_CACHE_H_
#define XLA_SERVICE_GPU_KERNEL_REUSE_CACHE_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/stream_executor/launch_dim.h"
namespace xla {
namespace gpu {
class KernelReuseCache {
public:
struct Entry {
std::string kernel_name;
LaunchDimensions launch_dimensions;
std::optional<se::ClusterDim> cluster_dim;
int64_t shmem_bytes = 0;
std::string binary;
};
struct NamedBinary {
std::string name;
std::vector<uint8_t> binary;
};
absl::Status Load(const CompilationCacheProto& proto);
CompilationCacheProto Export() const;
bool IsEmpty() const { return cache_.empty(); }
void Clear() {
cache_.clear();
hits_.clear();
}
std::pair<absl::StatusOr<const Entry*>, bool > GetWithStatus(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator,
const std::function<absl::StatusOr<Entry>()>& generator);
std::pair<absl::StatusOr<const Entry*>, bool > GetWithStatus(
std::string fingerprint,
const std::function<absl::StatusOr<Entry>()>& generator);
private:
absl::flat_hash_map<std::string , Entry> cache_;
absl::flat_hash_set<std::string> hits_;
};
absl::Status UpdateDiskKernelCache(
absl::string_view path, bool do_append,
const CompilationCacheProto& current_cache,
absl::Span<const KernelReuseCache::NamedBinary> binaries_to_cache);
std::string GetComputationFingerprint(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator = "");
}
}
#endif
#include "xla/service/gpu/kernel_reuse_cache.h"
#include <functional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
std::string GetArgumentFingerprint(
absl::Span<const KernelArgument> kernel_arguments) {
return absl::StrJoin(
kernel_arguments, ",", [](std::string* s, const KernelArgument& arg) {
if (arg.first_with_same_slice().has_value()) {
absl::StrAppend(s, "=", arg.first_with_same_slice().value());
return;
}
absl::StrAppend(s, arg.alignment());
if (arg.aliased()) {
absl::StrAppend(s, "a");
}
if (arg.written()) {
absl::StrAppend(s, "w");
}
});
}
}
std::string GetComputationFingerprint(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator) {
auto print_options = HloPrintOptions::Fingerprint()
.set_print_only_essential_constants(false)
.set_print_operand_shape(false);
return absl::StrCat(discriminator, "(",
GetArgumentFingerprint(kernel_arguments), ")",
fused_computation->ToString(print_options));
}
absl::Status KernelReuseCache::Load(const CompilationCacheProto& proto) {
for (const auto& [name, entry] : proto.entries()) {
std::optional<se::ClusterDim> cluster_dim;
if (entry.has_cluster_dim()) {
cluster_dim =
se::ClusterDim{entry.cluster_dim().x(), entry.cluster_dim().y(),
entry.cluster_dim().z()};
}
TF_RET_CHECK(
cache_
.insert(
{entry.fingerprint(),
Entry{name,
LaunchDimensions{
entry.launch_dimensions().num_blocks(),
entry.launch_dimensions().num_threads_per_block()},
cluster_dim, entry.shmem_bytes(), entry.binary()}})
.second);
}
return absl::OkStatus();
}
CompilationCacheProto KernelReuseCache::Export() const {
CompilationCacheProto proto;
for (const auto& [fingerprint, cache_entry] : cache_) {
if (!hits_.contains(fingerprint)) {
VLOG(5) << "Not exporting unused " << cache_entry.kernel_name;
continue;
}
auto [it, inserted] = proto.mutable_entries()->emplace(
cache_entry.kernel_name, CompilationCacheEntryProto{});
CHECK(inserted) << cache_entry.kernel_name;
CompilationCacheEntryProto& proto_entry = it->second;
proto_entry.set_fingerprint(fingerprint);
LaunchDimensionsProto launch_dimensions_proto;
launch_dimensions_proto.set_num_blocks(
cache_entry.launch_dimensions.num_blocks());
launch_dimensions_proto.set_num_threads_per_block(
cache_entry.launch_dimensions.num_threads_per_block());
*proto_entry.mutable_launch_dimensions() = launch_dimensions_proto;
if (cache_entry.cluster_dim.has_value()) {
ClusterDimProto cluster_dim_proto;
cluster_dim_proto.set_x(cache_entry.cluster_dim->x);
cluster_dim_proto.set_y(cache_entry.cluster_dim->y);
cluster_dim_proto.set_z(cache_entry.cluster_dim->z);
*proto_entry.mutable_cluster_dim() = cluster_dim_proto;
}
proto_entry.set_shmem_bytes(cache_entry.shmem_bytes);
proto_entry.set_binary(cache_entry.binary);
}
return proto;
}
absl::Status UpdateDiskKernelCache(
absl::string_view path, const bool do_append,
const CompilationCacheProto& current_cache,
absl::Span<const KernelReuseCache::NamedBinary> binaries_to_cache) {
CompilationCacheProto disk_cache;
if (do_append) {
std::string serialized;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(),
std::string(path), &serialized));
if (!disk_cache.ParseFromString(std::string(serialized))) {
return Internal("Failed to parse serialized CompilationCacheProto.");
}
}
auto entries = disk_cache.mutable_entries();
int stored_kernel_count = 0;
for (const auto& [name, binary] : binaries_to_cache) {
auto it_current = current_cache.entries().find(name);
TF_RET_CHECK(it_current != current_cache.entries().end());
auto [it_disk, inserted] = entries->insert({name, it_current->second});
TF_RET_CHECK(inserted);
TF_RET_CHECK(!binary.empty());
it_disk->second.set_binary(reinterpret_cast<const char*>(binary.data()),
binary.size());
VLOG(5) << "Cached kernel: " << name << ": " << binary.size();
++stored_kernel_count;
}
if (stored_kernel_count > 0) {
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(path),
disk_cache.SerializeAsString()));
VLOG(2) << "Stored " << stored_kernel_count << " / "
<< binaries_to_cache.size() << " kernels in the cache file.";
}
return absl::OkStatus();
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
std::string fingerprint = GetComputationFingerprint(
fused_computation, kernel_arguments, discriminator);
VLOG(4) << "Fingerprint: ";
XLA_VLOG_LINES(4, fingerprint);
return GetWithStatus(std::move(fingerprint), generator);
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
std::string fingerprint,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
hits_.insert(fingerprint);
auto it = cache_.find(fingerprint);
if (it != cache_.end()) {
return {&it->second, true};
}
absl::StatusOr<Entry> entry = generator();
if (entry.ok()) {
it =
cache_.insert({std::move(fingerprint), std::move(entry.value())}).first;
return {&it->second, false};
}
return {entry.status(), false};
}
}
} | #include "xla/service/gpu/kernel_reuse_cache.h"
#include <gtest/gtest.h>
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
namespace xla {
namespace gpu {
namespace {
using KernelReuseTest = ::testing::Test;
TEST_F(KernelReuseTest, ExportAndLoadWork) {
KernelReuseCache cache;
EXPECT_TRUE(cache.IsEmpty());
auto [result, was_cached] = cache.GetWithStatus(
"fingerprint", []() { return KernelReuseCache::Entry{}; });
TF_EXPECT_OK(result);
EXPECT_NE(result.value(), nullptr);
EXPECT_FALSE(was_cached);
EXPECT_FALSE(cache.IsEmpty());
const CompilationCacheProto proto = cache.Export();
cache.Clear();
EXPECT_TRUE(cache.IsEmpty());
TF_EXPECT_OK(cache.Load(proto));
EXPECT_FALSE(cache.IsEmpty());
}
TEST_F(KernelReuseTest, UpdatingDiskKernelCacheWorks) {
std::string cache_file_path;
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_path));
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k1");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, false,
proto,
{{.name = "k1", .binary = {5, 6}}}));
}
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k2");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, true,
proto,
{{.name = "k2", .binary = {7, 8}}}));
}
std::string serialized;
TF_EXPECT_OK(
tsl::ReadFileToString(tsl::Env::Default(), cache_file_path, &serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
EXPECT_EQ(proto.entries_size(), 2);
}
}
}
} | 2,076 |
#ifndef XLA_SERVICE_GPU_CONV_ALGORITHM_PICKER_H_
#define XLA_SERVICE_GPU_CONV_ALGORITHM_PICKER_H_
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
class GpuConvAlgorithmPicker : public HloModulePass {
public:
explicit GpuConvAlgorithmPicker(AutotuneConfig config) : config_(config) {}
absl::string_view name() const override {
return "gpu-conv-algorithm-picker";
}
static bool IsEnabled(const HloModule* module) {
return module->config().debug_options().xla_gpu_autotune_level() != 0;
}
static bool IsCandidate(const HloInstruction* instr) {
return IsCustomCallToDnnConvolution(*instr);
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
absl::StatusOr<bool> RunOnInstruction(HloInstruction* instr);
absl::StatusOr<AutotuneResult> PickBestAlgorithm(
const HloCustomCallInstruction* instr);
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
struct ReferenceResult {
stream_executor::dnn::AlgorithmDesc algorithm;
std::vector<stream_executor::DeviceMemoryBase> buffers;
};
struct AutotuneRuntimeArguments {
const HloModuleConfig hlo_module_config;
RedzoneBuffers rz_buffers;
const GpuConvConfig gpu_conv_config;
std::optional<std::string> canonical_hlo;
static absl::StatusOr<AutotuneRuntimeArguments> FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options);
};
absl::StatusOr<AutotuneResult> AutotuneOneConvRunner(
GenericConvRunner* runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const stream_executor::dnn::AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments);
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCacheCuda(
const HloCustomCallInstruction* instr);
#endif
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCacheRocm(
const HloCustomCallInstruction* instr);
private:
AutotuneConfig config_;
};
}
}
#endif
#include "xla/service/gpu/conv_algorithm_picker.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_autotuning.pb.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/hlo_algorithm_denylist.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/lazy_op_runner.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/util/env_var.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#if CUDNN_VERSION >= 90000
#include "third_party/gpus/cudnn/cudnn_ops.h"
#else
#include "third_party/gpus/cudnn/cudnn_ops_infer.h"
#endif
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
namespace {
using se::DeviceMemoryBase;
using se::dnn::AlgorithmDesc;
using std::optional;
class ScratchAllocator : public se::ScratchAllocator {
public:
ScratchAllocator(int device_ordinal,
se::DeviceMemoryAllocator* memory_allocator)
: device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {}
int64_t GetMemoryLimitInBytes() override {
return ScratchAllocator::GetDefaultMemoryLimitInBytes();
}
int64_t TotalAllocatedBytes() { return total_allocated_bytes_; }
static int64_t GetDefaultMemoryLimitInBytes() {
int64_t value;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar("TF_CUDNN_WORKSPACE_LIMIT_IN_MB",
1LL << 12, &value));
return value * (1LL << 20);
}
absl::StatusOr<se::DeviceMemory<uint8_t>> AllocateBytes(
int64_t byte_size) override;
template <typename T>
absl::StatusOr<se::DeviceMemory<T>> Allocate(int64_t num_elements) {
TF_ASSIGN_OR_RETURN(se::DeviceMemory<uint8_t> bytes,
AllocateBytes(num_elements * sizeof(T)));
return se::DeviceMemory<T>(bytes);
}
private:
const int device_ordinal_;
se::DeviceMemoryAllocator* memory_allocator_;
std::vector<se::OwningDeviceMemory> allocated_buffers_;
int64_t total_allocated_bytes_ = 0;
};
absl::StatusOr<se::DeviceMemory<uint8_t>> ScratchAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
TF_ASSIGN_OR_RETURN(se::OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_, byte_size,
false));
total_allocated_bytes_ += byte_size;
se::DeviceMemoryBase buffer_addr = *allocated_buffer;
allocated_buffers_.push_back(std::move(allocated_buffer));
return se::DeviceMemory<uint8_t>(buffer_addr);
}
absl::StatusOr<std::vector<GenericConvRunner>> GetAlgorithms(
const GpuConvConfig& config, se::Stream* stream, bool use_cudnn_frontend,
bool use_fallback, const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config.output_type));
se::StreamExecutor* stream_exec = stream->parent();
std::vector<GenericConvRunner> result;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
switch (kind) {
default:
return Internal("Unknown ConvolutionKind %d", kind);
case se::dnn::ConvolutionKind::FORWARD_BIAS_ACTIVATION: {
if (!config.fusion) {
return Internal(
"GpuConvConfig had fusion ConvolutionKind but no FusionConfig.");
}
std::vector<std::unique_ptr<const se::dnn::FusedConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetFusedConvolveRunners(
use_cudnn_frontend,
se::dnn::ConvolutionKind::FORWARD, input_type,
BiasTypeForInputType(input_type), output_type,
config.conv_result_scale,
config.fusion->side_input_scale,
config.fusion->leakyrelu_alpha, stream,
config.input_descriptor, config.filter_descriptor,
config.bias_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, config.fusion->mode, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::FusedConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD_GRAPH: {
std::vector<std::unique_ptr<const se::dnn::GraphConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetGraphConvolveRunners(
kind, input_type, output_type, stream, config.input_descriptor,
config.filter_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, numeric_options, &runners, config.serialized_graph));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::GraphConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD:
case se::dnn::ConvolutionKind::BACKWARD_DATA:
case se::dnn::ConvolutionKind::BACKWARD_FILTER: {
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
use_cudnn_frontend, kind, input_type, output_type, stream,
config.input_descriptor,
DeviceMemoryBase(nullptr),
config.filter_descriptor,
DeviceMemoryBase(nullptr),
config.output_descriptor,
DeviceMemoryBase(nullptr), config.conv_desc,
use_fallback, nullptr, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::ConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
}
return result;
}
absl::StatusOr<std::vector<std::unique_ptr<const se::dnn::ConvRunner>>>
GetMIOpenAlgorithms(const HloCustomCallInstruction* instr,
absl::Span<se::DeviceMemoryBase> operand_buffers,
absl::Span<se::DeviceMemoryBase> result_buffers,
se::StreamExecutor* stream_exec,
ScratchAllocator* scratch_allocator, se::Stream* stream,
const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(GpuConvConfig config, GetGpuConvConfig(instr));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType dtype,
GetDNNDataTypeFromPrimitiveType(config.output_type));
TF_ASSIGN_OR_RETURN(
GpuConvParams params,
GetGpuConvParams(config, operand_buffers, result_buffers));
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
false, kind, dtype, dtype, stream,
params.config->input_descriptor, params.input_buf,
params.config->filter_descriptor, params.filter_buf,
params.config->output_descriptor, params.output_buf,
params.config->conv_desc,
false, scratch_allocator, numeric_options,
&runners));
return runners;
}
std::string NumBytesToString(int64_t bytes) {
return absl::StrCat(tsl::strings::HumanReadableNumBytes(bytes), " (", bytes,
"B)");
}
CudnnVersion GetCudnnVersion(se::StreamExecutor* stream_executor) {
se::dnn::VersionInfo version = GetDnnVersionInfoOrDefault(stream_executor);
CudnnVersion cudnn_version;
cudnn_version.set_major(version.major_version());
cudnn_version.set_minor(version.minor_version());
cudnn_version.set_patch(version.patch());
return cudnn_version;
}
ComputeCapability GetComputeCapability(se::StreamExecutor* stream_executor) {
ComputeCapability cc;
se::CudaComputeCapability se_cc =
stream_executor->GetDeviceDescription().cuda_compute_capability();
cc.set_major(se_cc.major);
cc.set_minor(se_cc.minor);
return cc;
}
void PrintPlatformInfo(const se::Stream* stream) {
auto* se = stream->parent();
const auto& desc = se->GetDeviceDescription();
LOG(ERROR) << "Device: " << desc.name();
LOG(ERROR) << "Platform: " << desc.platform_version();
LOG(ERROR) << "Driver: " << desc.driver_version();
LOG(ERROR) << "Runtime: " << desc.runtime_version();
auto dnn_version = GetDnnVersionInfo(se);
if (dnn_version.ok()) {
auto v = dnn_version.value();
LOG(ERROR) << "cudnn version: " << v.major_version() << "."
<< v.minor_version() << "." << v.patch();
}
}
absl::StatusOr<bool> CheckRedzones(const se::RedzoneAllocator& allocator,
se::Stream* stream, absl::string_view name,
std::string_view instr_str,
AutotuneResult* result) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("CudnnConvAlgorithmPicker checking redzones",
2);
using RedzoneCheckStatus = se::RedzoneAllocator::RedzoneCheckStatus;
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus redzone_check,
allocator.CheckRedzones());
if (redzone_check.ok()) {
return true;
}
auto* fail = result->mutable_failure();
fail->set_kind(AutotuneResult::REDZONE_MODIFIED);
*fail->mutable_msg() = redzone_check.RedzoneFailureMsg();
fail->set_buffer_address(
reinterpret_cast<uint64_t>(redzone_check.user_buffer_address));
LOG(ERROR) << absl::StreamFormat(
"Detected cudnn out-of-bounds write in conv %s buffer! This is likely a "
"cudnn bug. We will skip this algorithm in the future, but your GPU "
"state may already be corrupted, leading to incorrect results. Within "
"Google, no action is needed on your part. Outside of Google, please "
"ensure you're running the latest version of cudnn. If that doesn't fix "
"the problem, please file a bug with this full error message and we'll "
"contact nvidia.",
name);
LOG(ERROR) << redzone_check.RedzoneFailureMsg();
LOG(ERROR) << "HloInstruction " << instr_str;
PrintPlatformInfo(stream);
return false;
}
}
bool ShouldInitConvData(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 2;
}
bool ShouldCheckConv(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 4;
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithm(
const HloCustomCallInstruction* instr) {
return AutotunerUtil::Autotune(
instr, config_, [&] { return PickBestAlgorithmNoCache(instr); });
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr) {
if (config_.IsDeviceless()) {
AutotuneResult result;
result.mutable_algorithm()->set_algo_id(-1);
return result;
}
se::StreamExecutor* stream_exec = config_.GetExecutor();
absl::MutexLock lock(&GetGpuMutex(stream_exec));
if (!stream_exec->SynchronizeAllActivity()) {
return Internal(
"Failed to synchronize GPU for autotuning conv instruction");
}
absl::StatusOr<AutotuneResult> result_or(Internal("Unknown platform."));
se::Platform::Id platform_id = stream_exec->GetPlatform()->id();
if (platform_id == se::rocm::kROCmPlatformId) {
result_or = PickBestAlgorithmNoCacheRocm(instr);
} else if (platform_id == se::cuda::kCudaPlatformId) {
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
result_or = PickBestAlgorithmNoCacheCuda(instr);
#endif
}
return result_or;
}
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
absl::StatusOr<GpuConvAlgorithmPicker::AutotuneRuntimeArguments>
GpuConvAlgorithmPicker::AutotuneRuntimeArguments::FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options) {
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*instr, config, debug_options,
RedzoneBuffers::kAllInputsOutputsNoScratch));
std::string canonical_hlo(
AutotuneCacheKey(config.GetExecutor()->GetDeviceDescription().model_str(),
*instr)
.GetHlo());
TF_ASSIGN_OR_RETURN(GpuConvConfig gpu_conv_config, GetGpuConvConfig(instr));
GpuConvAlgorithmPicker::AutotuneRuntimeArguments runtime_arguments = {
instr->GetModule()->config(),
std::move(rz_buffers),
std::move(gpu_conv_config),
{canonical_hlo}};
return runtime_arguments;
}
struct CudnnVersionRange {
using TupleVersion = std::tuple<int, int, int>;
TupleVersion begin;
TupleVersion end;
bool IsInRange(const CudnnVersion& other) const {
TupleVersion other_version{other.major(), other.minor(), other.patch()};
return begin <= other_version && other_version < end;
}
CudnnVersionRange(const CudnnVersion& begin, const CudnnVersion& end)
: begin(begin.major(), begin.minor(), begin.patch()),
end(end.major(), end.minor(), end.patch()) {}
CudnnVersionRange(const TupleVersion& begin, const TupleVersion& end)
: begin(begin), end(end) {}
};
struct ComputeCapabilityRange {
using TupleComputeCapability = std::tuple<int, int>;
TupleComputeCapability begin;
TupleComputeCapability end;
bool IsInRange(const ComputeCapability& other) const {
TupleComputeCapability other_cc{other.major(), other.minor()};
return begin <= other_cc && other_cc < end;
}
};
struct DisabledAlgorithm {
CudnnVersionRange cudnn_version_range;
ComputeCapabilityRange compute_capability_range;
int algo_id;
};
static const DisabledAlgorithm kDisabledAlgorithms[] = {
{{{9, 0, 0}, {10, 0, 0}},
{{6, 0}, {8, 0}},
14}};
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::AutotuneOneConvRunner(
GenericConvRunner* const runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments) {
auto alg = runner->ToAlgorithmDesc();
se::StreamExecutor* stream_exec = config_.GetExecutor();
XLA_SCOPED_LOGGING_TIMER_LEVEL(
absl::StrCat("CudnnConvAlgorithmPicker::PickBestAlgorithm algo ",
alg.ToString()),
2);
auto make_failure = [&alg](AutotuneResult::FailureKind kind,
absl::string_view msg) {
AutotuneResult result;
*result.mutable_algorithm() = alg.ToProto();
result.mutable_failure()->set_kind(kind);
result.mutable_failure()->set_msg( msg.data(), msg.size());
return result;
};
AlgorithmDesc alg_key(alg.algo_id(), alg.tensor_ops_enabled(), std::nullopt);
std::string instr_str = instruction_info.has_value()
? std::string(instruction_info->GetHlo())
: "<unknown>";
for (const auto& disabled_algo : kDisabledAlgorithms) {
if (disabled_algo.cudnn_version_range.IsInRange(
GetCudnnVersion(stream_exec)) &&
disabled_algo.compute_capability_range.IsInRange(
GetComputeCapability(stream_exec)) &&
disabled_algo.algo_id == alg.algo_id()) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
}
if (absl::c_linear_search(disabled_algos, alg_key)) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
GpuConvConfig config = runtime_arguments.gpu_conv_config;
auto activation_mode =
config.fusion ? config.fusion->mode : se::dnn::ActivationMode::kNone;
if (!alg.is_cudnn_frontend() &&
config.kind == CudnnConvKind::kForwardActivation &&
activation_mode == se::dnn::ActivationMode::kNone &&
alg.algo_id() != CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) {
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for implicit RELU.");
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator scratch_allocator,
AutotunerUtil::CreateRedzoneAllocator(
config_, runtime_arguments.hlo_module_config.debug_options()));
se::dnn::ProfileResult profile_result;
VLOG(4) << "Trying algorithm " << alg.ToString() << " for " << instr_str;
SlowOperationAlarm alarm(absl::Seconds(1), [&] {
return absl::StrFormat(
"Trying algorithm %s for conv %s is taking a while...", alg.ToString(),
instr_str);
});
std::optional<size_t> workspace_size =
runner->ToAlgorithmDesc().workspace_size();
if (!workspace_size) {
return make_failure(AutotuneResult::UNKNOWN,
"Internal error: missing workspace size from "
"OpRunner::ToAlgorithmDesc()");
}
auto scratch_or = scratch_allocator.AllocateBytes(*workspace_size);
if (!scratch_or.ok()) {
return make_failure(AutotuneResult::DISQUALIFIED,
absl::StrCat("Scratch allocation failed: ",
scratch_or.status().ToString()));
}
se::DeviceMemoryBase scratch_memory = scratch_or.value();
RunConvOptions options;
options.runner_cache = runner;
float max_time = 0;
float min_time = std::numeric_limits<float>::max();
absl::Status launch_status;
std::vector<se::DeviceMemoryBase> operand_buffers =
runtime_arguments.rz_buffers.input_buffers();
std::vector<se::DeviceMemoryBase> result_buffers =
runtime_arguments.rz_buffers.output_buffers();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
options.profile_result = &profile_result;
profile_result.set_warmup_run_executed(true);
constexpr int kMaxIter = 10;
int num_iters = 0;
for (; num_iters < kMaxIter && launch_status.ok(); ++num_iters) {
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
if (!profile_result.is_valid()) {
break;
}
float old_min_time = min_time;
min_time = std::min(min_time, profile_result.elapsed_time_in_ms());
max_time = std::max(max_time, profile_result.elapsed_time_in_ms());
constexpr float kThreshold = 0.05f;
if (std::abs(profile_result.elapsed_time_in_ms() - old_min_time) /
old_min_time <
kThreshold) {
break;
}
}
if (!launch_status.ok()) {
VLOG(5) << "Launch failed: " << launch_status;
return make_failure(
AutotuneResult::DISQUALIFIED,
absl::StrCat("Profiling failure on cuDNN engine ", alg.ToString(), ": ",
launch_status.ToString()));
}
if (!profile_result.is_valid()) {
VLOG(5) << "Launch succeeded but profile result is invalid.";
return make_failure(
AutotuneResult::UNKNOWN,
absl::StrCat("Launch succeeded but profile result is invalid, "
"with cuDNN engine ",
alg.ToString(), ": ", launch_sta | #include "xla/service/gpu/conv_algorithm_picker.h"
#include <cstdint>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/platform_util.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GpuConvAlgorithmPickerTest : public HloTestBase {
public:
GpuConvAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
};
TEST_F(GpuConvAlgorithmPickerTest, SetAlgorithm) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[3,56,56,16]{2,1,0,3} parameter(0)
%arg1 = f32[3,3,3,64]{2,1,0,3} parameter(1)
ROOT %conv = f32[54,54,16,64]{1,0,3,2} convolution(%arg0, %arg1), window={size=3x3}, dim_labels=f01b_i01o->01bf
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
const se::GpuComputeCapability& cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(GpuConvRewriter(cc), m.get()));
changed = false;
DebugOptions opts = DefaultDebugOptionsIgnoringFlags();
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_scratch_bytes = result.scratch_bytes();
int64_t new_scratch_bytes = old_scratch_bytes + 1;
result.set_scratch_bytes(new_scratch_bytes);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(GpuConvRewriter(cc), m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK(RunHloPass(TupleSimplifier(), m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&conv))));
EXPECT_THAT(
conv->shape(),
GmockMatch(m::Shape().WithSubshape(
{1}, m::Shape().WithElementType(U8).WithDims({new_scratch_bytes}))));
TF_ASSERT_OK_AND_ASSIGN(auto dnn_version, GetDnnVersionInfo(stream_exec));
if (dnn_version.major_version() >= 9 && dnn_version.major_version() < 10 &&
std::holds_alternative<stream_executor::CudaComputeCapability>(cc) &&
std::get<stream_executor::CudaComputeCapability>(cc).major == 7 &&
std::get<stream_executor::CudaComputeCapability>(cc).minor == 0) {
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->has_cudnn_conv_backend_config() &&
conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.algorithm()
.algo_id() != 14);
}
}
}
} | 2,077 |
#ifndef XLA_SERVICE_GPU_SCATTER_SLICE_SIMPLIFIER_H_
#define XLA_SERVICE_GPU_SCATTER_SLICE_SIMPLIFIER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ScatterSliceSimplifier : public HloModulePass {
public:
absl::string_view name() const override { return "scatter-slice-simplifier"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/scatter_slice_simplifier.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsValidIntermediaryUser(const HloInstruction* instruction) {
return instruction->IsElementwise() ||
instruction->opcode() == HloOpcode::kGetTupleElement;
}
class ScatterSliceMatcher {
public:
explicit ScatterSliceMatcher(const HloScatterInstruction* scatter)
: scatter_(scatter),
operand_dimensions_(
scatter->scatter_operands()[0]->shape().dimensions()),
result_dimensions_(operand_dimensions_.begin(),
operand_dimensions_.end()) {}
std::optional<Shape> InferShape() {
VLOG(10) << "Evaluating scatter " << scatter_->name();
if (!AreAllUsersValid(scatter_)) {
return std::nullopt;
}
std::vector<Shape> result_shapes;
absl::c_transform(scatter_->scatter_operands(),
std::back_inserter(result_shapes),
[&](const HloInstruction* op) {
return ShapeUtil::MakeShape(op->shape().element_type(),
result_dimensions_);
});
return ShapeUtil::MakeMaybeTupleShape(result_shapes);
}
private:
bool UpdateDimensions(const HloSliceInstruction* slice) {
int64_t rank = slice->shape().rank();
for (int64_t i = 0; i < rank; ++i) {
if (slice->slice_starts(i) != 0 || slice->slice_strides(i) != 1) {
return false;
}
if (slice->slice_limits(i) != result_dimensions_[i]) {
if (result_dimensions_[i] != operand_dimensions_[i]) {
return false;
}
auto& update_window_dims =
scatter_->scatter_dimension_numbers().update_window_dims();
if (absl::c_binary_search(update_window_dims, i)) {
return false;
}
result_dimensions_[i] = slice->slice_limits(i);
VLOG(10) << "Dimension " << i << " truncated to size "
<< result_dimensions_[i];
}
}
return true;
}
bool IsUserValid(const HloInstruction* op) {
VLOG(10) << "Visiting user " << op->name();
if (auto* slice = DynCast<HloSliceInstruction>(op)) {
return UpdateDimensions(slice);
}
bool is_valid = visited_set_.contains(op) ||
(IsValidIntermediaryUser(op) && AreAllUsersValid(op));
if (is_valid) {
visited_set_.emplace(op);
}
return is_valid;
}
bool AreAllUsersValid(const HloInstruction* op) {
if (op->user_count() == 0) {
return !op->IsRoot();
}
return absl::c_all_of(op->users(), [this](const HloInstruction* user) {
return IsUserValid(user);
});
}
const HloScatterInstruction* scatter_;
absl::flat_hash_set<const HloInstruction*> visited_set_;
absl::Span<const int64_t> operand_dimensions_;
DimensionVector result_dimensions_;
};
HloInstruction* CreateSliceFrom(HloInstruction* operand, const Shape& shape) {
std::vector<int64_t> start_indices(shape.rank(), 0);
std::vector<int64_t> limit_indices(shape.rank());
std::vector<int64_t> strides(shape.rank(), 1);
for (int64_t i = 0; i < shape.rank(); ++i) {
limit_indices[i] = shape.dimensions(i);
}
return operand->AddInstruction(HloInstruction::CreateSlice(
shape, operand, start_indices, limit_indices, strides));
}
HloInstruction* CreateScatterFrom(HloScatterInstruction* scatter,
const Shape& shape) {
std::vector<HloInstruction*> operands(scatter->scatter_operand_count());
for (int64_t i = 0; i < operands.size(); ++i) {
operands[i] =
CreateSliceFrom(scatter->scatter_operands()[i],
shape.IsTuple() ? shape.tuple_shapes(i) : shape);
}
return scatter->AddInstruction(HloInstruction::CreateScatter(
shape, absl::MakeSpan(operands), scatter->scatter_indices(),
scatter->scatter_updates(), scatter->called_computations()[0],
scatter->scatter_dimension_numbers(), scatter->indices_are_sorted(),
scatter->unique_indices()));
}
class ScatterSliceSimplifierVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleScatter(HloInstruction* instruction) override {
auto* scatter = Cast<HloScatterInstruction>(instruction);
std::optional<Shape> result_shape =
ScatterSliceMatcher(scatter).InferShape();
if (!result_shape.has_value()) {
return absl::OkStatus();
}
VLOG(2) << "Matched scatter " << scatter->name() << " with shape "
<< scatter->shape().ToString() << ", inferred result shape "
<< result_shape->ToString() << " (from the slice users)";
HloInstruction* new_scatter = CreateScatterFrom(scatter, *result_shape);
return ReplaceAllUsersRecursive(scatter, new_scatter);
}
private:
absl::Status ReplaceAllUsersRecursive(HloInstruction* old_instruction,
HloInstruction* new_instruction) {
replacements_[old_instruction] = new_instruction;
std::vector<HloInstruction*> users = old_instruction->users();
for (HloInstruction* user : users) {
if (user->parent() == nullptr) {
VLOG(3) << "Skipping user " << user->name() << " (already replaced)";
continue;
}
TF_RETURN_IF_ERROR(ReplaceUserRecursive(user, new_instruction));
}
return absl::OkStatus();
}
absl::Status ReplaceUserRecursive(HloInstruction* user,
HloInstruction* operand) {
VLOG(3) << "Replacing scatter user " << user->name();
if (user->opcode() == HloOpcode::kSlice) {
return ReplaceInstruction(user, operand);
}
HloInstruction* new_user = nullptr;
if (user->IsElementwise()) {
auto new_shape = [operand](HloInstruction* from) {
return ShapeUtil::MakeShape(from->shape().element_type(),
operand->shape().dimensions());
};
std::vector<HloInstruction*> new_operands;
absl::c_transform(user->operands(), std::back_inserter(new_operands),
[&](HloInstruction* op) {
auto it = replacements_.find(op);
return it != replacements_.end()
? it->second
: CreateSliceFrom(op, new_shape(op));
});
new_user = user->AddInstruction(
user->CloneWithNewOperands(new_shape(user), new_operands));
} else {
auto* gte = Cast<HloGetTupleElementInstruction>(user);
TF_ASSIGN_OR_RETURN(new_user,
MakeGetTupleElementHlo(operand, gte->tuple_index(),
&user->metadata()));
}
return ReplaceAllUsersRecursive(user, new_user);
}
absl::flat_hash_map<HloInstruction*, HloInstruction*> replacements_;
};
}
absl::StatusOr<bool> ScatterSliceSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return ScatterSliceSimplifierVisitor{}.RunOnModule(module, execution_threads);
}
} | #include "xla/service/gpu/scatter_slice_simplifier.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ScatterSliceSimplifierTest = HloTestBase;
TEST_F(ScatterSliceSimplifierTest, Scatter1D) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[8] slice(%scatter), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Scatter(m::Slice(m::Constant()), m::Parameter(0),
m::Parameter(1))
.WithShape(F32, {8})));
}
TEST_F(ScatterSliceSimplifierTest, Scatter3D) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[2] parameter(0)
%updates = f32[2,4,4] parameter(1)
%operands = f32[5,4,4] constant(0)
%scatter = f32[5,4,4] scatter(%operands, %indices, %updates), update_window_dims={1,2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[4,4,4] slice(%scatter), slice={[0:4], [0:4], [0:4]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Scatter(m::Slice(m::Constant()), m::Parameter(0),
m::Parameter(1))
.WithShape(F32, {4, 4, 4})));
}
TEST_F(ScatterSliceSimplifierTest, ScatterMultiOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32_add_F16 {
%lhs.0 = f32[] parameter(0)
%rhs.0 = f32[] parameter(2)
%add.0 = f32[] add(%lhs.0, %rhs.0)
%lhs.1 = f16[] parameter(1)
%rhs.1 = f16[] parameter(3)
%add.1 = f16[] add(%lhs.1, %rhs.1)
ROOT %tuple = (f32[], f16[]) tuple(%add.0, %add.1)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates.0 = f32[4] parameter(1)
%updates.1 = f16[4] parameter(2)
%operands.0 = f32[9] constant(0)
%operands.1 = f16[9] constant(0)
%scatter = (f32[9], f16[9]) scatter(%operands.0, %operands.1, %indices, %updates.0, %updates.1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32_add_F16
%gte.0 = f32[9] get-tuple-element(%scatter), index=0
%slice.0 = f32[8] slice(%gte.0), slice={[0:8]}
%gte.1 = f16[9] get-tuple-element(%scatter), index=1
%slice.1 = f16[8] slice(%gte.1), slice={[0:8]}
ROOT %tuple = (f32[8], f16[8]) tuple(%slice.0, %slice.1)
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Slice(m::Constant()),
m::Parameter(0), m::Parameter(1), m::Parameter(2));
Shape expected_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {8}), ShapeUtil::MakeShape(F16, {8})});
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(expected_scatter),
m::GetTupleElement(expected_scatter))
.WithShapeEqualTo(&expected_shape)));
}
TEST_F(ScatterSliceSimplifierTest, NotMatching) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
slice_not_truncation {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[8] slice(%scatter), slice={[1:9]}
}
slice_with_stride {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[4] slice(%scatter), slice={[0:8:2]}
}
scatter_multiple_users {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%slice = f32[8] slice(%scatter), slice={[0:8]}
ROOT %tuple = (f32[9], f32[8]) tuple(%scatter, %slice)
}
scatter_incompatible_slices {
%indices = s32[2] parameter(0)
%updates = f32[2,4] parameter(1)
%operands = f32[4,4] constant(0)
%scatter = f32[4,4] scatter(%operands, %indices, %updates), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%slice.0 = f32[3,4] slice(%scatter), slice={[0:3], [0:4]}
%slice.1 = f32[4,3] slice(%scatter), slice={[0:4], [0:3]}
ROOT %tuple = (f32[3,4], f32[4,3]) tuple(%slice.0, %slice.1)
}
slice_not_found {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[8] constant(0)
%scatter = f32[8] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %exp = f32[8] exponential(%scatter)
}
slice_update_dimensions {
%indices = s32[10] parameter(0)
%updates = f32[10,1,128] parameter(1)
%operands = f32[100,128] constant(0)
%scatter = f32[100,128] scatter(%operands, %indices, %updates), update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
ROOT %slice = f32[100,64] slice(%scatter), slice={[0:100], [0:64]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_FALSE(RunHloPass(&test_pass, module.get()).value());
}
TEST_F(ScatterSliceSimplifierTest, IntermediaryUsers) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%unary = f32[9] abs(%scatter)
%slice.0 = f32[8] slice(%unary), slice={[0:8]}
%binary = f32[9] maximum(%scatter, %operands)
%slice.1 = f32[8] slice(%binary), slice={[0:8]}
ROOT %tuple = (f32[8], f32[8]) tuple(%slice.0, %slice.1)
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1));
Shape expected_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {8}), ShapeUtil::MakeShape(F32, {8})});
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Abs(expected_scatter),
m::Maximum(expected_scatter, m::Slice(m::Constant())))
.WithShapeEqualTo(&expected_shape)));
}
TEST_F(ScatterSliceSimplifierTest, IntermediaryChain) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%elementwise.0 = f32[9] abs(%scatter)
%elementwise.1 = f32[9] exponential(%elementwise.0)
%elementwise.2 = f32[9] add(%elementwise.0, %elementwise.1)
ROOT %result = f32[8] slice(%elementwise.2), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Abs(expected_scatter),
m::Exp(m::Abs(expected_scatter)))
.WithShape(F32, {8})));
}
TEST_F(ScatterSliceSimplifierTest, DiamondShape) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32_mul_F32 {
%lhs.0 = f32[] parameter(0)
%rhs.0 = f32[] parameter(2)
%add.0 = f32[] add(%lhs.0, %rhs.0)
%lhs.1 = f32[] parameter(1)
%rhs.1 = f32[] parameter(3)
%mul.1 = f32[] multiply(%lhs.1, %rhs.1)
ROOT %tuple = (f32[], f32[]) tuple(%add.0, %mul.1)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates.0 = f32[4] parameter(1)
%updates.1 = f32[4] parameter(2)
%operands.0 = f32[9] constant(0)
%operands.1 = f32[9] constant(0)
%scatter = (f32[9], f32[9]) scatter(%operands.0, %operands.1, %indices, %updates.0, %updates.1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32_mul_F32
%gte.0 = f32[9] get-tuple-element(%scatter), index=0
%gte.1 = f32[9] get-tuple-element(%scatter), index=1
%consumer = f32[9] add(%gte.0, %gte.1)
ROOT %slice = f32[8] slice(%consumer), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Slice(m::Constant()),
m::Parameter(0), m::Parameter(1), m::Parameter(2));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::GetTupleElement(expected_scatter),
m::GetTupleElement(expected_scatter))
.WithShape(F32, {8})));
}
TEST_F(ScatterSliceSimplifierTest, ElementwiseSelect) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%add_F32 {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY main {
%indices = s32[4] parameter(0)
%updates = f32[4] parameter(1)
%operands = f32[9] constant(0)
%scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32
%pred_ = pred[9] parameter(2)
%select = f32[9] select(%pred_, %scatter, %operands)
ROOT %slice = f32[8] slice(%select), slice={[0:8]}
}
)")
.value();
ScatterSliceSimplifier test_pass;
ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value());
auto expected_scatter =
m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Select(m::Slice(m::Parameter(2)), expected_scatter,
m::Slice(m::Constant()))
.WithShape(F32, {8})));
}
}
} | 2,078 |
#ifndef XLA_SERVICE_GPU_GPU_COMPILER_H_
#define XLA_SERVICE_GPU_GPU_COMPILER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "llvm/IR/Module.h"
#include "xla/autotune_results.pb.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/compiler.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/buffer_sharing.h"
#include "xla/service/gpu/compile_module_to_llvm_ir.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/llvm_compiler.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
class GpuCompiler : public LLVMCompiler {
public:
GpuCompiler(se::Platform::Id platform_id, const char* target_triple,
const char* data_layout);
using LLVMCompiler::Compile;
absl::StatusOr<std::unique_ptr<HloModule>> RunHloPasses(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) override;
absl::StatusOr<std::unique_ptr<Executable>> RunBackend(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
const CompileOptions& options) override;
absl::StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group,
AotCompilationOptions const& options) override;
se::Platform::Id PlatformId() const override { return platform_id_; }
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const override;
absl::StatusOr<std::unique_ptr<AotCompilationResult>>
LoadAotCompilationResult(const std::string& serialized_aot_result) override;
static absl::StatusOr<std::unique_ptr<AotCompilationResult>>
LoadAotCompilationResultStatic(const std::string& serialized_aot_result);
absl::StatusOr<std::unique_ptr<AotCompilationResult>> Export(
Executable* executable) const override;
absl::Status RunPostSchedulingPipelines(
HloModule* module, int64_t scheduler_mem_limit,
const se::DeviceDescription& gpu_device_info) const;
std::string target_triple() const { return target_triple_; }
std::string data_layout() const { return data_layout_; }
const char* GetDataLayout() const { return data_layout_; }
const char* GetTargetTriple() const { return target_triple_; }
int64_t GetPointerSize() const { return pointer_size_; }
static absl::StatusOr<Compiler::TargetConfig> GetTargetConfig(
const Compiler::CompileOptions& options, const DebugOptions& debug_opts,
se::StreamExecutor* executor);
virtual HloDataflowAnalysis::CanShareBuffer GetCanShareBuffer() const {
return &FusionCanShareBufferHint;
}
virtual int32_t GetToolkitVersion() const = 0;
virtual absl::StatusOr<bool> CanUseLinkModules(
const HloModuleConfig& config) {
return false;
}
protected:
struct BackendCompileResult {
std::string asm_text;
std::vector<uint8_t> binary;
Thunk::BinaryMap dnn_compiled_graphs;
};
virtual absl::Status OptimizeHloPostLayoutAssignment(
HloModule* hlo_module, se::StreamExecutor* stream_exec,
const CompileOptions& options, const TargetConfig& gpu_target_config,
tsl::thread::ThreadPool* thread_pool);
virtual bool RequiresCollectiveScheduleLinearizer(
const HloModule* module, se::StreamExecutor* stream_exec) {
return false;
}
virtual absl::Status AddConvAndGemmAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool) {
return absl::OkStatus();
}
virtual absl::Status AddGemmFusionAutotuningPasses(
HloPassPipeline* pipeline, HloModule* hlo_module,
AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool,
const MultiProcessKeyValueStore& key_value_store) {
return absl::OkStatus();
}
virtual absl::Status AddCustomKernelReplacementPasses(
HloPassPipeline* pipeline, const DebugOptions& debug_options) {
return absl::OkStatus();
}
virtual absl::Status RunCudnnFusionCompilerPass(
HloModule* module, se::StreamExecutor* stream_exec,
Thunk::BinaryMap* dnn_compiled_graphs) {
return absl::OkStatus();
}
AlgebraicSimplifierOptions GetAlgebraicSimplifierOptions(
const HloModuleConfig& config);
private:
struct CompileResultWithMetadata {
BackendCompileResult backend_result;
CompileModuleResults compile_module_results;
};
absl::StatusOr<CompileResultWithMetadata> CompileToBackendResult(
HloModule* module, llvm::LLVMContext* llvm_context,
se::StreamExecutor* executor, const CompileOptions& options,
const se::DeviceDescription& gpu_device_info);
absl::StatusOr<BackendCompileResult> CompileAndLink(
const HloModuleConfig& module_config,
CompileModuleResults& compile_module_results,
se::GpuComputeCapability gpu_version, se::StreamExecutor* stream_exec,
const CompileOptions& options, const HloModule* debug_module);
absl::StatusOr<BackendCompileResult> CompileSingleModule(
const HloModuleConfig& module_config,
se::GpuComputeCapability gpu_version, const HloModule* debug_module,
llvm::Module* llvm_module, bool relocatable,
const CompileOptions& options, std::optional<int> shard_number);
absl::Status LoadAutotuneResultsFromFile(const DebugOptions& debug_options);
absl::Status SerializeAutotuneResultsToFile(
const DebugOptions& debug_options);
absl::Status RunPreSchedulingPasses(HloModule* module,
se::StreamExecutor* stream_exec);
absl::Status OptimizeHloModule(HloModule* hlo_module,
se::StreamExecutor* stream_exec,
const CompileOptions& options,
const TargetConfig& gpu_target_config);
virtual absl::Status OptimizeHloConvolutionCanonicalization(
HloModule* hlo_module, se::GpuComputeCapability gpu_version,
se::dnn::VersionInfo dnn_version,
se::DeviceMemoryAllocator* device_allocator) = 0;
virtual absl::StatusOr<BackendCompileResult> CompileTargetBinary(
const HloModuleConfig& module_config, llvm::Module* llvm_module,
se::GpuComputeCapability gpu_version, bool relocatable,
const HloModule* debug_module, const CompileOptions& options) = 0;
absl::Status PrepareHloModuleForIrEmitting(HloModule* hlo_module);
virtual absl::StatusOr<std::vector<uint8_t>> LinkModules(
se::StreamExecutor* stream_exec,
std::vector<std::vector<uint8_t>> modules,
const DebugOptions& debug_options) {
return Unimplemented("LinkModules is not implemented.");
}
se::Platform::Id platform_id_;
const char* target_triple_;
const char* data_layout_;
const int64_t pointer_size_;
GpuCompiler(const GpuCompiler&) = delete;
GpuCompiler& operator=(const GpuCompiler&) = delete;
};
}
}
#endif
#include "xla/service/gpu/gpu_compiler.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/SplitModule.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/maybe_owning.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/all_gather_broadcast_reorder.h"
#include "xla/service/all_gather_combiner.h"
#include "xla/service/all_reduce_combiner.h"
#include "xla/service/all_reduce_contiguous.h"
#include "xla/service/all_reduce_folder.h"
#include "xla/service/all_reduce_promotion.h"
#include "xla/service/all_reduce_reassociate.h"
#include "xla/service/all_reduce_splitter.h"
#include "xla/service/async_collective_creator.h"
#include "xla/service/batchnorm_expander.h"
#include "xla/service/bitcast_dtypes_expander.h"
#include "xla/service/broadcast_canonicalizer.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/call_inliner.h"
#include "xla/service/collective_permute_decomposer.h"
#include "xla/service/collective_pipeliner.h"
#include "xla/service/collectives_schedule_linearizer.h"
#include "xla/service/comparison_expander.h"
#include "xla/service/compiler.h"
#include "xla/service/conditional_canonicalizer.h"
#include "xla/service/conditional_simplifier.h"
#include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include "xla/service/convert_mover.h"
#include "xla/service/convolution_4d_expander.h"
#include "xla/service/convolution_pred_expander.h"
#include "xla/service/copy_insertion.h"
#include "xla/service/cpu_gpu_shape_verifier.h"
#include "xla/service/dot_decomposer.h"
#include "xla/service/dot_merger.h"
#include "xla/service/dump.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/service/dynamic_index_splitter.h"
#include "xla/service/dynamic_padder.h"
#include "xla/service/eigh_expander.h"
#include "xla/service/executable.h"
#include "xla/service/export_hlo.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/float_normalization.h"
#include "xla/service/float_support.h"
#include "xla/service/gather_expander.h"
#include "xla/service/gather_simplifier.h"
#include "xla/service/gpu/algorithm_checker.h"
#include "xla/service/gpu/all_reduce_blueconnect.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/collective_permute_cycle_decomposer.h"
#include "xla/service/gpu/command_buffer_scheduling.h"
#include "xla/service/gpu/compile_module_to_llvm_ir.h"
#include "xla/service/gpu/conv_layout_normalization.h"
#include "xla/service/gpu/custom_kernel_fusion_rewriter.h"
#include "xla/service/gpu/dot_dimension_sorter.h"
#include "xla/service/gpu/dot_operand_converter.h"
#include "xla/service/gpu/double_buffer_loop_unrolling.h"
#include "xla/service/gpu/dynamic_slice_fusion_rewriter.h"
#include "xla/service/gpu/execution_stream_assignment.h"
#include "xla/service/gpu/fusion_pipeline.h"
#include "xla/service/gpu/fusion_wrapper.h"
#include "xla/service/gpu/gemm_broadcast_folding_rewriter.h"
#include "xla/service/gpu/gemm_fusion.h"
#include "xla/service/gpu/gemm_rewriter.h"
#include "xla/service/gpu/gemv_rewriter.h"
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "xla/service/gpu/gpu_all_gather_optimizer.h"
#include "xla/service/gpu/gpu_async_collective_annotator.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/gpu_convert_async_collectives_to_sync.h"
#include "xla/service/gpu/gpu_executable.h"
#include "xla/service/gpu/gpu_float_support.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/gpu/gpu_layout_assignment.h"
#include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include "xla/service/gpu/gpu_reduce_scatter_creator.h"
#include "xla/service/gpu/gpu_sanitize_constant_names.h"
#include "xla/service/gpu/gpu_scatter_expander.h"
#include "xla/service/gpu/gpu_spmd_pipeline.h"
#include "xla/service/gpu/gpu_windowed_einsum_handler.h"
#include "xla/service/gpu/hlo_fusion_stats.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_unnested.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/move_copy_to_users.h"
#include "xla/service/gpu/pipelined_p2p_rewriter.h"
#include "xla/service/gpu/prepare_hlo_for_ir_emitting_pipeline.h"
#include "xla/service/gpu/reduction_degenerate_dim_remover.h"
#include "xla/service/gpu/reduction_dimension_grouper.h"
#include "xla/service/gpu/reduction_layout_normalizer.h"
#include "xla/service/gpu/reduction_splitter.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/rename_fusions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/runtime_intrinsics.h"
#include "xla/service/gpu/scatter_slice_simplifier.h"
#include "xla/service/gpu/softmax_rewriter_triton.h"
#include "xla/service/gpu/stream_attribute_annotator.h"
#include "xla/service/gpu/stream_attribute_async_wrapper.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/topk_specializer.h"
#include "xla/service/gpu/topk_splitter.h"
#include "xla/service/gpu/tree_reduction_rewriter.h"
#include "xla/service/gpu/triton_fusion_numerics_verifier.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_computation_deduplicator.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_rematerialization.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_transfer_asyncifier.h"
#include "xla/service/host_offload_legalize.h"
#include "xla/service/host_offloader.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/logistic_expander.h"
#include "xla/service/operand_upcaster.h"
#include "xla/service/optimization_barrier_expander.h"
#include "xla/service/optimize_input_output_buffer_alias.h"
#include "xla/service/qr_expander.h"
#include "xla/service/real_imag_expander.h"
#include "xla/service/reduce_decomposer.h"
#include "xla/service/reduce_scatter_combiner.h"
#include "xla/service/reduce_scatter_reassociate.h"
#include "xla/service/reduce_window_rewriter.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/result_caster.h"
#include "xla/service/rng_bit_generator_expander.h"
#include "xla/service/rng_expander.h"
#include "xla/service/scatter_expander.h"
#include "xla/service/scatter_simplifier.h"
#include "xla/service/sharding_remover.h"
#include "xla/service/simplify_fp_conversions.h"
#include "xla/service/slice_sinker.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/service/sort_simplifier.h"
#include "xla/service/stable_sort_expander.h"
#include "xla/service/stochastic_convert_decomposer.h"
#include "xla/service/sub_byte_normalization.h"
#include "xla/service/topk_rewriter.h"
#include "xla/service/transpose_folding.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_all_reduce_code_motion.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/service/zero_sized_hlo_elimination.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/integrations/device_mem_allocator.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
#ifdef PLATFORM_GOOGLE
#include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#endif
namespace xla {
namespace gpu {
namespace {
using MaybeOwningThreadPool = MaybeOwning<tsl::thread::ThreadPool>;
MaybeOwningThreadPool CreateMaybeOwningThreadPool(
int parallelism, tsl::thread::ThreadPool* default_thread_pool,
int default_parallelism) {
CHECK_GE(parallelism, 0);
CHECK_GE(default_parallelism, 1);
CHECK(default_thread_pool == nullptr ||
default_thread_pool->CurrentThreadId() == -1);
auto create_thread_pool = [&](int num_threads) {
CHECK_GE(num_threads, 1);
return std::make_unique<tsl::thread::ThreadPool>(tsl::Env::Default(), "",
num_threads);
};
switch (parallelism) {
case 0:
if (default_thread_pool == nullptr && default_parallelism > 1) {
return MaybeOwningThreadPool(create_thread_pool(default_parallelism));
}
return MaybeOwningThreadPool(default_thread_pool);
case 1:
return MaybeOwningThreadPool(nullptr);
default:
return MaybeOwningThreadPool(create_thread_pool(parallelism));
}
}
absl::StatusOr<AutotuneConfig> GetAutotuneConfig(
se::StreamExecutor* stream_exec, const DebugOptions& debug_options,
const GpuCompiler::CompileOptions& options,
const Compiler::TargetConfig& gpu_target_config) {
if (stream_exec) {
return AutotuneConfig{DeviceConfig{stream_exec, options.device_allocator},
debug_options};
}
return AutotuneConfig{
DevicelessConfig{gpu_target_config.device_description_str},
debug_options};
}
se::GpuComputeCapability GetGpuVersion(const se::StreamExecutor* stream_exec) {
return stream_exec->GetDeviceDescription().gpu_compute_capability();
}
class GpuThunkAotCompilationResult : public AotCompilationResult {
public:
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromModule(const HloModule* hlo_module,
const BufferAssignment* buffer_assignment,
std::string_view asm_text, absl::Span<const uint8_t> binary,
const Thunk::BinaryMap& dnn_compiled_graphs) {
CompilationResultProto proto;
*proto.mutable_hlo_module_with_config() = hlo_module->ToProtoWithConfig();
*proto.mutable_buffer_assignment() = buffer_assignment->ToProto();
proto.set_asm_text(std::string(asm_text));
proto.set_binary(binary.data(), binary.size());
proto.mutable_dnn_compiled_graphs()->insert(dnn_compiled_graphs.cbegin(),
dnn_compiled_graphs.cend());
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(hlo_module->Clone(),
std::move(proto)));
}
static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>>
FromString(const std::string& serialized) {
CompilationResultProto proto;
if (!proto.ParseFromString(serialized)) {
return Internal(
"Failed to parse serialized GpuThunkAotCompilationResult.");
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
HloModule::CreateFromProtoWithConfig(proto.hlo_module_with_config()));
return std::unique_ptr<GpuThunkAotCompilationResult>(
new GpuThunkAotCompilationResult(std::move(module), std::move(proto)));
}
absl::StatusOr<std::string> SerializeAsString() const override {
return proto_.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Executable>> LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const override;
const HloModule* optimized_module() const override { return module_.get(); }
std::unique_ptr<HloModule> consume_optimized_module() override {
return std::move(module_);
}
private:
GpuThunkAotCompilationResult(std::unique_ptr<HloModule> module,
CompilationResultProto proto)
: module_(std::move(module)), proto_(std::move(proto)) {}
std::unique_ptr<HloModule> module_;
CompilationResultProto proto_;
};
}
absl::StatusOr<std::unique_ptr<Executable>>
GpuThunkAotCompilationResult::LoadExecutable(
Compiler* compiler, const se::StreamExecutor* stream_exec) const {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
HloModule::CreateFromProtoWithConfig(proto_.hlo_module_with_config()));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<BufferAssignment> buffer_assignment,
BufferAssignment::FromProto(proto_.buffer_assignment(), hlo_module.get(),
compiler->BufferSizeBytesFunction(),
nullptr));
ExecutionStreamAssignment execution_stream_assignment(hlo_module.get());
std::vector<uint8_t> binary(proto_.binary().begin(), proto_.binary().end());
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithId(compiler->PlatformId()));
std::string platform_name = platform->Name();
const se::DeviceDescription& gpu_device_info =
stream_exec->GetDeviceDescription();
mlir::DialectRegistry registry;
auto mlir_context = std::make_unique<mlir::MLIRContext>(registry);
llvm::LLVMContext llvm_context;
auto* gpu_compiler = dynamic_cast<GpuCompiler*>(compiler);
if (gpu_compiler == nullptr) {
return Internal("Compiler is not a GpuCompiler.");
}
auto llvm_module = std::make_unique<llvm::Module>("", llvm_context);
llvm_module->setTargetTriple(gpu_compiler->target_triple());
llvm_module->setDataLayout(gpu_compiler->data_layout());
IrEmitterContext ir_emitter_context(
hlo_module.get(), buffer_assignment.get(), &execution_stream_assignment,
platform_name, gpu_device_info, mlir_context.get(), llvm_module.get(),
nullptr,
false);
auto ir_emitter = IrEmitterUnnested::Create(&ir_emitter_context);
TF_RETURN_IF_ERROR(
ir_emitter->EmitHloComputation(hlo_module->entry_computation()));
std::vector<GpuExecutable::ConstantInfo> constants =
std::move(ir_emitter_context.constants());
TF_ASSIGN_OR_RETURN(auto output_info,
GetOutputInfo(*hlo_module, *buffer_assignment));
const Shape& output_shape = hlo_module->result_shape();
int64_t debug_buffer_assignment_show_max =
hlo_module->config()
.debug_options()
.xla_debug_buffer_assignment_show_max();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<GpuExecutable> executable,
GpuExecutable::Create(GpuExecutable::Params{
proto_.asm_text(),
binary,
Thunk::BinaryMap(proto_.dnn_compiled_graphs().cbegin(),
proto_.dnn_compiled_graphs().cend()),
gpu_device_info.gpu_compute_capability(),
ir_emitter->ConsumeThunkSequence(),
std::move(constants),
std::move(output_info),
std::move(hlo_module->name()),
std::move(output_shape),
std::nullopt,
std::move(buffer_assignment),
debug_buffer_assignment_show_max,
std::move(hlo_module),
true}));
return executable;
}
GpuCompiler::GpuCompiler(se::Platform::Id platform_id,
const char* target_triple, const char* data_layout)
: platform_id_(platform_id),
target_triple_(target_triple),
data_layout_(data_layout),
pointer_size_(llvm::DataLayout(data_layout)
.getPointerSize(0 )) {}
namespace {
void AddHloVerifier(HloPassPipeline* pipeline, HloVerifierOpts&& opts = {},
bool debug_only = false) {
std::unique_ptr<TargetVerifierMetadata> verifier_metadata =
std::make_unique<CpuGpuVerifierMetadata>(std::move(opts));
if (debug_only) {
pipeline->AddInvariantCheckerDebug<HloVerifier>(
std::move(verifier_metadata), "hlo verifier (debug)");
} else {
pipeline->AddInvariantChecker<HloVerifier>(std::move(verifier_metadata),
"hlo verifier");
}
}
void CheckNotScheduled(HloModule* hlo_module) {
if (hlo_module->has_schedule() &&
!hlo_module->config().debug_options().xla_disable_all_hlo_passes()) {
LOG(WARNING) << "\nThe current HLO module " << hlo_module->name()
<< " is scheduled and optimized. \n"
<< "It is not expected to run optimization passes again.\n"
"Use a test method like RunAndCompareNoHloPasses() or "
<< "the xla_disable_all_hlo_passes flag.";
}
}
void LogDebugOptions(HloModule* hlo_module) {
XLA_VLOG_LINES(
1, absl::StrFormat("GpuCompilationEnvironment of hlo_module %s:\n%s",
hlo_module->name(),
hlo_module->config().debug_options().DebugString()));
}
AlgebraicSimplifierOptions LayoutInsensitiveAlgebraicSimplifierOptions(
const HloModuleConfig& hlo_module_config,
const Compiler::TargetConfig& gpu_target_config,
AlgebraicSimplifierOptions opts_from_compiler) {
AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =
opts_from_compiler;
layout_insensitive_algsimp_opts.set_conv_is_lowerable_callback(
GpuConvRewriter::ConvIsLowerable);
layout_insensitive_algsimp_opts.set_enable_dot_strength_reduction(
hlo_module_config.debug_options()
.xla_gpu_enable_dot_strength_reduction());
layout_insensitive_algsimp_opts.set_supports_non_canonical_dots(false);
layout_insensitive_algsimp_opts.set_minmax_propagate_nan(
!hlo_module_config.debug_options().xla_gpu_enable_fast_min_max());
layout_insensitive_algsimp_opts
.set_unconditionally_simplify_reduce_of_transpose_or_reshape(true);
if (gpu_target_config.platform_name == "ROCM") {
layout_insensitive_algsimp_opts.set_enable_conv_operand_swap(false);
}
layout_insensitive_algsimp_opts
.set_enable_unconditional_reduce_of_concat_replacement(false);
return layout_insensitive_algsimp_opts;
}
absl::Status RunPreSPMDPartitionerPasses(HloModule* hlo_module) {
HloPassPipeline pre_spmd_pipeline("pre-spmd-partitioner"); | #include "xla/service/gpu/gpu_compiler.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_hlo_schedule.h"
#include "xla/service/gpu/metrics.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/xla_debug_info_manager.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
using ::tsl::testing::StatusIs;
class GpuCompilerTest : public HloTestBase {
public:
absl::Status Schedule(HloModule* module) {
auto compiler = backend().compiler();
const se::DeviceDescription& gpu_device_info =
backend().default_stream_executor()->GetDeviceDescription();
TF_RETURN_IF_ERROR(ScheduleGpuModule(module, 4, gpu_device_info).status());
return tensorflow::down_cast<GpuCompiler*>(compiler)
->RunPostSchedulingPipelines(module, 4 * 1024 * 1024, gpu_device_info);
}
};
TEST_F(GpuCompilerTest, CompiledProgramsCount) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ResetCompiledProgramsCountForTesting();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_EQ(GetCompiledProgramsCount(), 1);
}
TEST_F(GpuCompilerTest, GenerateDebugInfoForNonAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
false})
.value();
EXPECT_TRUE(XlaDebugInfoManager::Get()->TracksModule(
executable->module().unique_id()));
}
TEST_F(GpuCompilerTest, DoesNotGenerateDebugInfoForAutotuningCompilations) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
p = f32[10]{0} parameter(0)
ROOT neg = f32[10]{0} negate(p)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
int module_id = module->unique_id();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_FALSE(XlaDebugInfoManager::Get()->TracksModule(module_id));
}
TEST_F(GpuCompilerTest, CopyInsertionFusion) {
const char* hlo_text = R"(
HloModule cluster
ENTRY main {
cst = f32[1]{0} constant({0})
ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cst, cst, cst, cst)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{0, 0}));
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
std::unique_ptr<HloModule> compiled_module =
backend()
.compiler()
->RunHloPasses(module->Clone(), backend().default_stream_executor(),
nullptr)
.value();
VLOG(2) << compiled_module->ToString();
size_t total_fusion_instrs = 0;
for (const HloInstruction* instr :
compiled_module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kFusion) {
++total_fusion_instrs;
}
}
EXPECT_EQ(total_fusion_instrs, 1);
const HloInstruction* entry_root =
compiled_module->entry_computation()->root_instruction();
EXPECT_THAT(
entry_root,
GmockMatch(m::Tuple(
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()),
m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()))));
}
TEST_F(GpuCompilerTest, CanRunScheduledModules) {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_disable_all_hlo_passes(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m, is_scheduled=true
w {
p = s8[] parameter(0)
ROOT n = s8[] negate(p)
}
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] fusion(p), kind=kLoop, calls=w
})",
config));
EXPECT_TRUE(Run(std::move(module), true));
}
class PersistedAutotuningTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions options = HloTestBase::GetDebugOptionsForTest();
options.set_xla_gpu_dump_autotune_results_to(
xla_gpu_dump_autotune_results_to_);
options.set_xla_gpu_load_autotune_results_from(
xla_gpu_load_autotune_results_from_);
return options;
}
std::string xla_gpu_dump_autotune_results_to_;
std::string xla_gpu_load_autotune_results_from_;
};
TEST_F(PersistedAutotuningTest, WriteResultsOnEachCompilation) {
constexpr absl::string_view kInvalidTextProto = "Invalid!";
xla_gpu_dump_autotune_results_to_ = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::WriteStringToFile(env, xla_gpu_dump_autotune_results_to_,
kInvalidTextProto));
TF_EXPECT_OK(GetOptimizedModule(kHloText).status());
{
std::string autotune_results_str =
ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
}
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
TEST_F(GpuCompilerTest, RemovesUnnecessaryCopyAfterScheduling) {
const absl::string_view hlo_string = R"(
HloModule all_gather_overlapping
condition {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[2,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_0)
all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}
all-gather-done = f32[2,128] all-gather-done(all-gather-start)
ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, all-gather-done, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[2,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[2,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(hlo_string));
EXPECT_EQ(CountCopies(*module), 5);
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kCopy);
TF_ASSERT_OK(Schedule(module.get()));
EXPECT_EQ(CountCopies(*module), 4);
module->entry_computation()->root_instruction();
while_op = root->operand(0)->operand(0);
EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),
HloOpcode::kAllGatherDone);
}
TEST_F(GpuCompilerTest,
GemmFusionIsNoOpWhenGemmFusionAutotunerFallsBackToCublas) {
GTEST_SKIP() << "TODO(b/344573710): this test is flaky, disable it "
<< " until flakiness is fixed.";
auto cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (!cc.IsAtLeastAmpere()) {
GTEST_SKIP() << "Autotuning results have only been generated for Ampere "
<< "and Hopper GPUs";
}
const absl::string_view hlo_string = R"(
HloModule test
ENTRY main {
param_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} parameter(0)
param_1 = bf16[4,3,32,1024]{3,2,1,0} parameter(1)
param_2 = s32[] parameter(2)
constant_0 = s32[] constant(0)
dynamic-slice_0 = bf16[1,3,32,1024]{3,2,1,0} dynamic-slice(param_1, param_2, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,3,32,1024}
reshape_0 = bf16[3,32,1024]{2,1,0} reshape(dynamic-slice_0)
broadcast_0 = bf16[3,32,1024,4,1024]{2,1,4,3,0} broadcast(reshape_0), dimensions={0,1,2}
add_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} add(param_0, broadcast_0)
transpose_0 = bf16[3,4,1024,32,1024]{2,1,4,3,0} transpose(add_0), dimensions={0,3,4,1,2}
slice_0 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[0:1], [0:4], [0:1024], [0:32], [0:1024]}
reshape_1 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_0)
copy_0 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_1)
constant_1 = bf16[] constant(0.08838)
broadcast_1 = bf16[4,1024,32,1024]{3,2,1,0} broadcast(constant_1), dimensions={}
multiply_0 = bf16[4,1024,32,1024]{3,2,1,0} multiply(copy_0, broadcast_1)
slice_1 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[1:2], [0:4], [0:1024], [0:32], [0:1024]}
reshape_2 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_1)
copy_1 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_2)
ROOT dot_0 = bf16[4,32,1024,1024]{3,2,1,0} dot(multiply_0, copy_1), lhs_batch_dims={0,2}, lhs_contracting_dims={3}, rhs_batch_dims={0,2}, rhs_contracting_dims={3}
}
)";
HloModuleConfig config;
DebugOptions triton_enabled_debug_options = GetDebugOptionsForTest();
triton_enabled_debug_options.set_xla_gpu_enable_address_computation_fusion(
false);
triton_enabled_debug_options
.set_xla_gpu_require_complete_aot_autotune_results(true);
config.set_debug_options(triton_enabled_debug_options);
config.set_replica_count(1);
config.set_num_partitions(1);
std::string path =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"gpu_compiler_test_autotune_db.textproto");
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(path));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_enabled_module,
GetOptimizedModule(std::move(module)));
AutotunerUtil::ClearAutotuneResults();
DebugOptions triton_disabled_debug_options = GetDebugOptionsForTest();
triton_disabled_debug_options.set_xla_gpu_enable_address_computation_fusion(
false);
triton_disabled_debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(triton_disabled_debug_options);
TF_ASSERT_OK_AND_ASSIGN(module,
ParseAndReturnVerifiedModule(hlo_string, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_disabled_module,
GetOptimizedModule(std::move(module)));
const HloInstruction* root =
triton_enabled_module->entry_computation()->root_instruction();
const HloInstruction* custom_op = root->operand(0)->operand(0);
EXPECT_TRUE(custom_op->IsCustomCall("__cublas$gemm"));
EXPECT_EQ(triton_enabled_module->computation_count(),
triton_disabled_module->computation_count());
}
TEST_F(GpuCompilerTest, CollectivePermuteDecompositionAndPipelining) {
const char* kModuleStr = R"(
HloModule cp
cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
recv-data = f32[1, 1024, 1024] collective-permute(send-data),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, channel_id=1
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond
ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
const char* kExpected = R"(
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID:[0-9]+]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: %[[CUSTOM_CALL:.*]] = custom-call
CHECK: %[[AFTER_ALL:.*]] = after-all
CHECK: %[[RESULT_RECV:.*]] = recv(%[[AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"},
CHECK-SAME: control-predecessors={%[[CUSTOM_CALL]]}
CHECK: %[[RESULT_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"},
CHECK-SAME: control-predecessors={%[[RESULT_RECV]]}
CHECK: ROOT
CHECK-SAME: %[[RESULT_RECV]]
CHECK: ENTRY
CHECK: %[[ENTRY_AFTER_ALL:.*]] = after-all
CHECK: %[[ENTRY_RECV:.*]] = recv(%[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"}
CHECK: %[[ENTRY_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[ENTRY_AFTER_ALL]])
CHECK-SAME: channel_id=1
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0",
CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,4}}"},
CHECK-SAME: control-predecessors={%[[ENTRY_RECV]]}
CHECK: %[[WHILE_INIT:.*]] = tuple
CHECK-SAME: %[[ENTRY_SEND]]
CHECK: while(%[[WHILE_INIT]])
CHECK: recv-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
CHECK: send-done
CHECK-SAME: channel_id=[[CHANNEL_ID]]
CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"}
)";
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true);
debug_options.set_xla_gpu_collective_permute_decomposer_threshold(1);
debug_options.set_xla_gpu_enable_pipelined_p2p(true);
debug_options.set_xla_gpu_enable_triton_gemm(false);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
TF_ASSERT_OK(Schedule(optimized_module.get()));
HloPrintOptions options;
options.set_print_operand_shape(false);
options.set_print_result_shape(false);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matched,
RunFileCheck(optimized_module->ToString(options), kExpected));
EXPECT_TRUE(filecheck_matched);
}
class KernelCacheTest : public HloTestBase {
public:
void SetUp() override {
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_name_));
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(bool can_use_link_modules,
dynamic_cast<GpuCompiler*>(backend().compiler())
->CanUseLinkModules(config));
if (!can_use_link_modules) {
GTEST_SKIP() << "Caching compiled kernels requires support of linking.";
}
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_kernel_cache_file(cache_file_name_);
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(true);
return debug_options;
}
bool CacheFileExists() {
if (!tsl::Env::Default()->FileExists(cache_file_name_).ok()) {
return false;
}
return true;
}
int CacheEntryCount() {
if (!CacheFileExists()) {
return 0;
}
std::string serialized;
TF_EXPECT_OK(tsl::ReadFileToString(tsl::Env::Default(), cache_file_name_,
&serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
return proto.entries_size();
}
std::string cache_file_name_;
static constexpr absl::string_view kHloText = R"(
ENTRY e {
p = s8[] parameter(0)
c = s8[] constant(8)
ROOT _ = s8[] add(p, c)
})";
};
TEST_F(KernelCacheTest, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
TEST_F(KernelCacheTest, NoCacheIsGeneratedWithoutCompiledKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(R"(
ENTRY e {
a = f32[5,5] parameter(0)
ROOT _ = f32[5,5] custom-call(a, a), custom_call_target="__cublas$gemm",
backend_config="{ \"gemm_backend_config\": {\"alpha_real\":1,\"beta\":0,\"dot_dimension_numbers\":{\"lhs_contracting_dimensions\":[\"1\"],\"rhs_contracting_dimensions\":[\"0\"],\"lhs_batch_dimensions\":[],\"rhs_batch_dimensions\":[]},\"alpha_imag\":0,\"precision_config\":{\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]},\"epilogue\":\"DEFAULT\"}}"
})",
false));
EXPECT_FALSE(CacheFileExists());
}
TEST_F(KernelCacheTest, CacheGrowsWithNewKernels) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(R"(
ENTRY e {
p = s8[] parameter(0)
ROOT _ = s8[] multiply(p, p)
})",
false));
EXPECT_EQ(CacheEntryCount(), 2);
}
class KernelCacheTestSingleThreaded : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_force_compilation_parallelism(1);
return debug_options;
}
};
TEST_F(KernelCacheTestSingleThreaded, CacheIsGenerated) {
EXPECT_FALSE(CacheFileExists());
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
EXPECT_TRUE(Run(kHloText, false));
EXPECT_EQ(CacheEntryCount(), 1);
}
class NoKernelCacheTest : public KernelCacheTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);
return debug_options;
}
};
TEST_F(NoKernelCacheTest, NoCacheWithoutCompilationParallelism) {
EXPECT_TRUE(Run(kHloText, false));
EXPECT_FALSE(CacheFileExists());
}
}
}
} | 2,079 |
#ifndef XLA_SERVICE_GPU_HLO_FUSION_STATS_H_
#define XLA_SERVICE_GPU_HLO_FUSION_STATS_H_
#include <cstdint>
#include <map>
#include <set>
#include <string>
#include "absl/status/status.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
namespace xla {
namespace gpu {
class HloOpcodeHistogram : public std::map<std::set<std::string>, int64_t> {
public:
std::string ToString();
};
class HloFusionStatsVisitor : public ConstDfsHloVisitorWithDefault {
public:
absl::Status RunOnModule(HloModule* module);
std::string ToString();
protected:
absl::Status DefaultAction(const xla::HloInstruction* instr) final;
absl::Status HandleFusion(const HloInstruction* fusion) override;
private:
int64_t num_fusions_ = 0;
int64_t num_loop_fusions_ = 0;
int64_t num_input_fusions_ = 0;
HloOpcodeHistogram loop_fusion_opcode_histogram_;
HloOpcodeHistogram input_fusion_opcode_histogram_;
};
}
}
#endif
#include "xla/service/gpu/hlo_fusion_stats.h"
#include <set>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
namespace {
class OpcodeCollector : public ConstDfsHloVisitorWithDefault {
public:
std::set<std::string> GetUniqueOpcodes() { return opcodes_; }
protected:
absl::Status DefaultAction(const xla::HloInstruction* instr) final {
switch (instr->opcode()) {
case HloOpcode::kConstant:
break;
case HloOpcode::kParameter:
break;
case HloOpcode::kAbs:
case HloOpcode::kCbrt:
case HloOpcode::kCeil:
case HloOpcode::kCos:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kNegate:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kDivide:
case HloOpcode::kMultiply:
case HloOpcode::kSubtract:
opcodes_.insert("cwise");
break;
default:
opcodes_.insert(std::string(HloOpcodeString(instr->opcode())));
}
return absl::OkStatus();
}
private:
std::set<std::string> opcodes_;
};
std::set<std::string> GetUniqueOpcodes(HloComputation* computation) {
OpcodeCollector collector;
if (!computation->Accept(&collector).ok()) {
return {};
}
return collector.GetUniqueOpcodes();
}
}
std::string HloOpcodeHistogram::ToString() {
std::string result;
for (const auto& entry : *this) {
absl::StrAppend(&result, "{", absl::StrJoin(entry.first, ", "),
"}: ", entry.second, "\n");
}
return result;
}
absl::Status HloFusionStatsVisitor::RunOnModule(HloModule* module) {
TF_RETURN_IF_ERROR(module->entry_computation()->Accept(this));
return absl::OkStatus();
}
std::string HloFusionStatsVisitor::ToString() {
return absl::StrCat("HLO Fusion Stats:\n",
"Number of fusion ops: ", num_fusions_, "\n",
"Number of kLoop fusions: ", num_loop_fusions_, "\n",
loop_fusion_opcode_histogram_.ToString(), "\n",
"Number of kInput fusions: ", num_input_fusions_, "\n",
input_fusion_opcode_histogram_.ToString());
}
absl::Status HloFusionStatsVisitor::DefaultAction(
const xla::HloInstruction* instr) {
return absl::OkStatus();
}
absl::Status HloFusionStatsVisitor::HandleFusion(const HloInstruction* fusion) {
num_fusions_++;
std::set<std::string> opcodes =
GetUniqueOpcodes(fusion->fused_instructions_computation());
if (fusion->fusion_kind() == HloInstruction::FusionKind::kLoop) {
num_loop_fusions_++;
loop_fusion_opcode_histogram_[opcodes]++;
} else if (fusion->fusion_kind() == HloInstruction::FusionKind::kInput) {
num_input_fusions_++;
input_fusion_opcode_histogram_[opcodes]++;
}
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/hlo_fusion_stats.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
using HloFusionStatsTest = HloTestBase;
TEST_F(HloFusionStatsTest, LoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
another_fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2},
to_apply=scalar_add_computation
mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add_computation
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
select_2 = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=another_fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
gte0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32,32]{2,1,0}, f32[32,32,32]{2,1,0})
tuple(gte1, gte1, select, select_2)
})")
.value();
HloFusionStatsVisitor fusion_stats_visitor;
TF_ASSERT_OK(
module.get()->entry_computation()->Accept(&fusion_stats_visitor));
SCOPED_TRACE(module->ToString());
std::string stats = fusion_stats_visitor.ToString();
ASSERT_TRUE(absl::StrContains(stats, "Number of fusion ops: 3"));
ASSERT_TRUE(absl::StrContains(stats, "Number of kLoop fusions: 2"));
ASSERT_TRUE(absl::StrContains(stats, "{broadcast, compare, select}: 2"));
ASSERT_TRUE(absl::StrContains(stats, "Number of kInput fusions: 1"));
ASSERT_TRUE(absl::StrContains(stats, "{cwise, reduce, tuple}: 1"));
}
TEST_F(HloFusionStatsTest, AggregateCwiseOps) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
ROOT exp = f32[8,1,5,16,1,2]{5,4,3,2,1,0} exponential(mul)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
ROOT fusion = f32[8,1,5,16,1,2]{5,4,3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation
})")
.value();
HloFusionStatsVisitor fusion_stats_visitor;
TF_ASSERT_OK(
module.get()->entry_computation()->Accept(&fusion_stats_visitor));
SCOPED_TRACE(module->ToString());
std::string stats = fusion_stats_visitor.ToString();
ASSERT_TRUE(absl::StrContains(stats, "{cwise}: 1")) << stats;
}
}
}
} | 2,080 |
#ifndef XLA_SERVICE_GPU_HLO_ALGORITHM_DENYLIST_H_
#define XLA_SERVICE_GPU_HLO_ALGORITHM_DENYLIST_H_
#include <string>
#include <vector>
#include "xla/autotuning.pb.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
std::vector<stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(
ComputeCapability cc, CudnnVersion cudnn_version,
const std::string& blas_version, const std::string& hlo);
std::string HloStringWithGpuBackendConfig(const std::string& hlo,
GpuBackendConfig config);
}
}
#endif
#include "xla/service/gpu/hlo_algorithm_denylist.h"
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/backend_config.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_autotuning.pb.h"
#include "xla/stream_executor/dnn.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
constexpr char kDefaultDenylist[] = R"pb(
entries {
hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 0
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 0
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1,
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 minor: 5 }
cudnn_version { major: 9 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
entries {
hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\""
backend_config {
operation_queue_id: 0
wait_on_operation_queues: []
cudnn_conv_backend_config: {
activation_mode: kNone
conv_result_scale: 1
side_input_scale: 1
leakyrelu_alpha: 0
},
force_earliest_schedule: false
}
cc { major: 7 minor: 5 }
cudnn_version { major: 9 minor: 1 patch: 1 }
algos { id: 14 }
}
)pb";
std::vector<stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(
ComputeCapability cc, CudnnVersion cudnn_version,
const std::string& blas_version, const std::string& hlo) {
using MapType = absl::flat_hash_map<
std::tuple<std::string, int, int, int, int, int, std::string>,
std::vector<stream_executor::dnn::AlgorithmDesc>>;
static MapType* denylist = [] {
auto* list = new MapType();
AlgorithmDenylist proto;
auto process_denylist = [list](const AlgorithmDenylist& proto) {
for (const auto& entry : proto.entries()) {
for (const auto& algo : entry.algos()) {
(*list)[std::make_tuple(HloStringWithGpuBackendConfig(
entry.hlo(), entry.backend_config()),
entry.cc().major(), entry.cc().minor(),
entry.cudnn_version().major(),
entry.cudnn_version().minor(),
entry.cudnn_version().patch(),
entry.blas_version())]
.emplace_back(algo.id(), algo.tensor_ops(), std::nullopt);
}
}
};
std::string file_path =
GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path();
if (!file_path.empty()) {
TF_CHECK_OK(tsl::ReadTextProto(tsl::Env::Default(), file_path, &proto));
process_denylist(proto);
}
CHECK(tsl::protobuf::TextFormat::ParseFromString(
std::string(kDefaultDenylist), &proto));
process_denylist(proto);
return list;
}();
std::vector<stream_executor::dnn::AlgorithmDesc> algorithms;
auto add_matching_disabled_algorithms_to_result = [&](const auto& key) {
auto iter = denylist->find(key);
if (iter != denylist->end()) {
algorithms.insert(algorithms.end(), iter->second.begin(),
iter->second.end());
}
};
auto key = std::make_tuple(hlo, cc.major(), cc.minor(), cudnn_version.major(),
cudnn_version.minor(), cudnn_version.patch(),
blas_version);
add_matching_disabled_algorithms_to_result(key);
std::get<6>(key) = std::string{};
add_matching_disabled_algorithms_to_result(key);
return algorithms;
}
std::string HloStringWithGpuBackendConfig(const std::string& hlo,
GpuBackendConfig config) {
BackendConfigWrapper backend_config(config);
return absl::StrCat(hlo, ", backend_config=", backend_config.GetRawString());
}
}
} | #include "xla/service/gpu/hlo_algorithm_denylist.h"
#include <cstdlib>
#include <string>
#include "absl/strings/str_cat.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class DenylistTest : public testing::Test {
protected:
DenylistTest() {
std::string existing_xla_flags;
const char* env = std::getenv("XLA_FLAGS");
if (env != nullptr) {
existing_xla_flags = absl::StrCat(env, " ");
}
tsl::setenv(
"XLA_FLAGS",
absl::StrCat(
existing_xla_flags, "--xla_gpu_algorithm_denylist_path=",
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu",
"data", "hlo_algorithm_denylist.pbtxt"))
.data(),
1);
config_ =
ParseTextProto<GpuBackendConfig>(
"operation_queue_id: 0 wait_on_operation_queues: [] "
"cudnn_conv_backend_config: { activation_mode: kNone "
"conv_result_scale: 1 side_input_scale: 0 leakyrelu_alpha: 0} "
"force_earliest_schedule: false")
.value();
}
GpuBackendConfig config_;
};
TEST_F(DenylistTest, DefaultTest) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_patch(2);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "9000",
HloStringWithGpuBackendConfig(
R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")",
config_));
EXPECT_THAT(list, testing::UnorderedElementsAre(
stream_executor::dnn::AlgorithmDesc{0, true},
stream_executor::dnn::AlgorithmDesc{0, false},
stream_executor::dnn::AlgorithmDesc{1, true},
stream_executor::dnn::AlgorithmDesc{1, false},
stream_executor::dnn::AlgorithmDesc{42, true},
stream_executor::dnn::AlgorithmDesc{42, false}));
}
TEST_F(DenylistTest, NegativeTest) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_minor(2);
auto list =
GetDisabledConvAlgorithms(cc, cudnn_version, "9000", R"(invalid hlo)");
EXPECT_THAT(list, testing::IsEmpty());
}
TEST_F(DenylistTest, NoBlasVersionSet) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(7);
cudnn_version.set_minor(6);
cudnn_version.set_patch(2);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "120301",
HloStringWithGpuBackendConfig(
R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")",
config_));
EXPECT_THAT(list, testing::UnorderedElementsAre(
stream_executor::dnn::AlgorithmDesc{42, true},
stream_executor::dnn::AlgorithmDesc{42, false}));
}
TEST_F(DenylistTest, EntryFromHardcodedList) {
ComputeCapability cc;
cc.set_major(7);
cc.set_minor(0);
CudnnVersion cudnn_version;
cudnn_version.set_major(9);
cudnn_version.set_minor(0);
cudnn_version.set_patch(0);
auto list = GetDisabledConvAlgorithms(
cc, cudnn_version, "9000",
HloStringWithGpuBackendConfig(
R"((f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target="__cudnn$convBiasActivationForward")",
config_));
EXPECT_THAT(list, testing::ElementsAre(
stream_executor::dnn::AlgorithmDesc{14, false}));
}
}
}
} | 2,081 |
#ifndef XLA_SERVICE_GPU_REDUCTION_UTILS_H_
#define XLA_SERVICE_GPU_REDUCTION_UTILS_H_
#include <cstdint>
#include <ostream>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
int64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config);
inline constexpr int64_t BatchedReductionRaceFreeBound() { return 8; }
struct ReductionDimensions {
constexpr static int kRowMajorReducedDimension = 0;
constexpr static int kRowKeptDimension = 1;
constexpr static int kRowMinorReducedDimension = 2;
constexpr static int kColMajorKeptDimension = 0;
constexpr static int kColReducedDimension = 1;
constexpr static int kColMinorKeptDimension = 2;
bool is_row_reduction;
Vector3 dimensions;
bool operator==(const ReductionDimensions& other) const {
return is_row_reduction == other.is_row_reduction &&
dimensions == other.dimensions;
}
};
std::ostream& operator<<(std::ostream& os,
const ReductionDimensions& reduction_dimensions);
bool IsUnnestedReductionFasterThanElemental(
const ReductionDimensions& reduction_dimensions);
bool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce);
ReductionDimensions GetReductionKindAndContiguousComponents(
const HloInstruction& reduce);
Vector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions);
int64_t ReductionDimensionRaceFreeBound(
const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions);
bool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions);
bool IsRealReductionHero(const HloInstruction& root,
const HloInstruction& hero);
bool AreReductionsMultiOutputFusionCompatible(
const HloInstruction* reduce_hero, const HloInstruction* first_reduce);
}
}
#endif
#include "xla/service/gpu/reduction_utils.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <ostream>
#include "absl/algorithm/container.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#ifdef GOOGLE_CUDA
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/stream_executor/cuda/cuda_asm_compiler.h"
#endif
namespace xla {
namespace gpu {
namespace {
Vector3 PartitionShapeByMiddleDimensions(
const Shape& shape, absl::Span<const int64_t> dims_middle) {
CHECK(LayoutUtil::AreDimensionsConsecutive(shape.layout(), dims_middle));
Vector3 values = {1, 1, 1};
enum Segment { kMajor = 0, kMiddle = 1, kMinor = 2 };
Segment cur_segment = kMinor;
for (int64_t cur_dim : LayoutUtil::MinorToMajor(shape)) {
if (cur_segment != kMajor) {
bool cur_dim_in_middle = absl::c_linear_search(dims_middle, cur_dim);
if (cur_segment == kMinor) {
if (cur_dim_in_middle) {
cur_segment = kMiddle;
}
} else if (cur_segment == kMiddle) {
if (!cur_dim_in_middle) {
cur_segment = kMajor;
}
}
}
values[cur_segment] *= shape.dimensions(cur_dim);
}
return values;
}
}
int64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config) {
#ifdef GOOGLE_CUDA
auto ptxas_config =
PtxOptsFromDebugOptions(hlo_module_config.debug_options());
auto ptxas_version_tuple =
se::GetAsmCompilerVersion(ptxas_config.preferred_cuda_dir);
if (!ptxas_version_tuple.ok() ||
ptxas_version_tuple.value() < std::array<int64_t, 3>{12, 2, 0}) {
return 512;
}
#endif
return 1024;
}
Vector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
int64_t tile_z = std::min(reduction_dimensions.dimensions[0],
BatchedReductionRaceFreeBound());
return {tile_z, 1, 16};
}
return {1, 128, 1};
}
int64_t ReductionDimensionRaceFreeBound(
const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
Vector3 reduction_tiling = GetReductionTiling(reduction_dimensions);
if (reduction_dimensions.is_row_reduction) {
return MinThreadsXRowReduction(hlo_module_config) * reduction_tiling[2];
}
return WarpSize() * reduction_tiling[1];
}
bool IsUnnestedReductionFasterThanElemental(
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return (reduction_dimensions.dimensions[2] >= WarpSize()) ||
((WarpSize() % reduction_dimensions.dimensions[2]) == 0);
}
int64_t major_size = reduction_dimensions.dimensions[1];
int64_t minor_size = reduction_dimensions.dimensions[2];
bool prefer_elemental_emitter =
(major_size < WarpSize()) ||
(major_size < 2 * WarpSize() && minor_size < WarpSize()) ||
(major_size < 4 * WarpSize() && minor_size < 8) ||
(major_size < 8 * WarpSize() && minor_size < 3);
return !prefer_elemental_emitter;
}
bool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce) {
if (reduce.opcode() != HloOpcode::kReduce) {
return false;
}
const Shape& operand_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < operand_shape.dimensions().size(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
return (LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_keep) ||
LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_reduce)) &&
IsUnnestedReductionFasterThanElemental(
GetReductionKindAndContiguousComponents(reduce));
}
bool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return reduction_dimensions.dimensions[2] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions) &&
reduction_dimensions.dimensions[0] <=
BatchedReductionRaceFreeBound();
}
return reduction_dimensions.dimensions[1] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions);
}
std::ostream& operator<<(std::ostream& os,
const ReductionDimensions& reduction_dimensions) {
bool is_row_reduction = reduction_dimensions.is_row_reduction;
os << (is_row_reduction ? "row " : "column ") << "reduction ["
<< absl::StrJoin(reduction_dimensions.dimensions, ",") << "] -> ["
<< reduction_dimensions.dimensions[0] << ", "
<< reduction_dimensions
.dimensions[is_row_reduction
? ReductionDimensions::kRowKeptDimension
: ReductionDimensions::kColMinorKeptDimension]
<< "]";
return os;
}
ReductionDimensions GetReductionKindAndContiguousComponents(
const HloInstruction& reduce) {
Shape input_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < input_shape.rank(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
if (dims_to_keep.empty()) {
return {true,
{1, 1, ShapeUtil::ElementsIn(input_shape)}};
}
if (LayoutUtil::AreDimensionsConsecutive(input_shape.layout(),
dims_to_keep)) {
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_keep);
if (shape_partition[1] == 1) {
return {true,
{1, 1, shape_partition[0] * shape_partition[2]}};
}
if (shape_partition[2] == 1) {
return {false,
{1, shape_partition[0], shape_partition[1]}};
}
return {true, shape_partition};
}
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_reduce);
if (shape_partition[2] == 1) {
return {true,
{1, shape_partition[0], shape_partition[1]}};
}
return {false, shape_partition};
}
bool IsRealReductionHero(const HloInstruction& root,
const HloInstruction& hero) {
if (!IsReductionFromOrToContiguousDimensions(hero)) {
return false;
}
return &root == &hero ||
ReductionIsRaceFree(hero.GetModule()->config(),
GetReductionKindAndContiguousComponents(hero));
}
bool AreReductionsMultiOutputFusionCompatible(
const HloInstruction* reduce_hero, const HloInstruction* first_reduce) {
return GetReductionKindAndContiguousComponents(*reduce_hero) ==
GetReductionKindAndContiguousComponents(*first_reduce);
}
}
} | #include "xla/service/gpu/reduction_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using ReductionUtilsTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})";
TEST_F(ReductionUtilsTest, ReductionsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
neg = f32[32,64]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsWithSameCanonicalizedDimsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
bitcast = f32[32,8,8]{2,1,0} bitcast(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(bitcast, constant), dimensions={1,2}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOperandShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={0}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOutputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[64]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[64]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[64]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentReduceDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,32]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={0}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,32]{1,0} parameter(0)
neg = f32[32,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,32]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
}
}
} | 2,082 |
#ifndef XLA_SERVICE_GPU_GPU_P2P_PIPELINER_H_
#define XLA_SERVICE_GPU_GPU_P2P_PIPELINER_H_
#include "xla/service/hlo_pass_pipeline.h"
namespace xla {
namespace gpu {
void AddP2PPipeliner(HloPassPipeline& pipeline);
}
}
#endif
#include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include <cstdint>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/collective_pipeliner.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
bool ShouldPipeline(const HloInstruction* instr) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) {
return false;
}
auto it = instr->frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it == instr->frontend_attributes().map().end()) {
return false;
}
auto allowed_predecessor = [&]() {
return instr->opcode() == HloOpcode::kRecvDone &&
instr->control_predecessors().size() == 1 &&
instr->control_predecessors()[0]->opcode() == HloOpcode::kSend;
};
if (!instr->control_successors().empty() ||
(!instr->control_predecessors().empty() && !allowed_predecessor())) {
return false;
}
bool is_pipelined =
(instr->user_count() == 1 && instr->parent() != nullptr &&
instr->users()[0] == instr->parent()->root_instruction());
return !is_pipelined;
}
bool ShouldAllowLoopVariantParameterInChain(const HloInstruction* instr) {
CHECK(instr->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0)->opcode() == HloOpcode::kParameter);
return true;
}
absl::Status PostprocessP2PImpl(
HloInstruction* instr,
std::function<std::string(std::vector<ReplicaGroup>&)> transformer) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) {
return Internal("Expected SendDone/RecvDone as the pipelined collective");
}
instr = instr->mutable_operand(0);
if (!HloPredicateIsOp<HloOpcode::kRecv, HloOpcode::kSend>(instr)) {
return Internal("Expected Send/Recv as the SendDone/RecvDone operand");
}
auto validation_it =
instr->frontend_attributes().map().find(kSendRecvValidationAttr);
if (validation_it == instr->frontend_attributes().map().end() ||
validation_it->second == "invalid") {
return absl::OkStatus();
}
auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second);
if (!statusor_bounds.ok()) {
return statusor_bounds.status();
}
std::string validation_attr = transformer(statusor_bounds.value());
xla::FrontendAttributes attributes = instr->frontend_attributes();
(*attributes.mutable_map())[kSendRecvValidationAttr] = validation_attr;
instr->set_frontend_attributes(attributes);
return absl::OkStatus();
}
absl::Status PostprocessPeeledP2P(HloInstruction* instr) {
auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) {
std::vector<std::pair<int64_t, int64_t>> bounds;
bounds.reserve(replica_groups.size());
bool all_invalid = true;
for (const auto& replica_group : replica_groups) {
int64_t lower_bound = replica_group.replica_ids(0);
int64_t upper_bound = replica_group.replica_ids(1);
if (lower_bound <= 0 && upper_bound >= 0) {
all_invalid = false;
bounds.push_back({0, 0});
} else {
bounds.push_back({1, 0});
}
}
std::string validation_attr;
if (all_invalid) {
validation_attr = "invalid";
} else {
validation_attr = "{" +
absl::StrJoin(bounds, ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
}
return validation_attr;
};
return PostprocessP2PImpl(instr, transform_bounds);
};
absl::Status PostprocessRotatedP2P(HloInstruction* instr) {
auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) {
std::vector<std::pair<int64_t, int64_t>> bounds;
bounds.reserve(replica_groups.size());
bool all_invalid = true;
for (const auto& replica_group : replica_groups) {
int64_t lower_bound = replica_group.replica_ids(0);
int64_t upper_bound = replica_group.replica_ids(1);
if (lower_bound <= upper_bound) {
if (lower_bound >= 1) {
--lower_bound;
}
if (upper_bound >= 1) {
--upper_bound;
}
if (lower_bound <= upper_bound) {
all_invalid = false;
bounds.push_back({lower_bound, upper_bound});
} else {
bounds.push_back({1, 0});
}
} else {
bounds.push_back({lower_bound, upper_bound});
}
}
std::string validation_attr;
if (all_invalid) {
validation_attr = "invalid";
} else {
validation_attr = "{" +
absl::StrJoin(bounds, ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
}
return validation_attr;
};
return PostprocessP2PImpl(instr, transform_bounds);
}
}
void AddP2PPipeliner(HloPassPipeline& pipeline) {
CollectivePipeliner::Config config{
0,
INT64_MAX,
true,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
ShouldPipeline,
HloPredicateTrue,
HloPredicateTrue,
ShouldAllowLoopVariantParameterInChain,
true,
PostprocessPeeledP2P,
PostprocessRotatedP2P};
pipeline.AddPass<CollectivePipeliner>(config);
}
}
} | #include "xla/service/gpu/gpu_p2p_pipeliner.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
class GpuP2PPipelinerTest : public HloTestBase {
public:
GpuP2PPipelinerTest() {
const int64_t kNumReplicas = 1;
const int64_t kNumComputations = 4;
config_ = GetModuleConfigForTest(kNumReplicas,
kNumComputations);
}
absl::StatusOr<bool> RunOptimizer(HloModule* module) {
HloPassPipeline pipeline("optimizer");
pipeline.AddPass<HloVerifier>(false,
false);
AddP2PPipeliner(pipeline);
pipeline.AddPass<HloVerifier>(false,
false);
return pipeline.Run(module);
}
protected:
HloModuleConfig config_;
};
TEST_F(GpuP2PPipelinerTest,
TransformRecvSendBackwardsWithMetaDataPostProcessing) {
const char* kHloStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(10)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0",
_xla_send_recv_validation="{{1,7}}"
}
after-all.0.s = token[] after-all()
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.s),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0",
_xla_send_recv_validation="{{1,7}}"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}, control-predecessors={send.0}
recv-data = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value();
EXPECT_TRUE(RunOptimizer(module.get()).value());
XLA_VLOG_LINES(10, module->ToString());
auto while_op = FindInstruction(module.get(), "while");
EXPECT_EQ(while_op->opcode(), HloOpcode::kWhile);
EXPECT_EQ(while_op->shape().tuple_shapes().size(), 5);
auto recv1 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1"));
EXPECT_NE(recv1, nullptr);
auto recv2 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2"));
EXPECT_NE(recv2, nullptr);
EXPECT_EQ(recv1->channel_id(), recv2->channel_id());
auto send1 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1"));
EXPECT_NE(send1, nullptr);
auto send2 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2"));
EXPECT_NE(send2, nullptr);
EXPECT_EQ(send1->channel_id(), send2->channel_id());
const char* kPeeledAttr = "_xla_send_recv_validation=\"invalid\"";
const char* kRotatedAttr = "_xla_send_recv_validation=\"{{0,6}}\"";
EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kRotatedAttr));
EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kRotatedAttr));
}
}
}
} | 2,083 |
#ifndef XLA_SERVICE_GPU_CUDNN_SUPPORT_UTILS_H_
#define XLA_SERVICE_GPU_CUDNN_SUPPORT_UTILS_H_
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size);
struct CudnnReorderTransposeConfig {
Shape transpose_shape;
Shape result_shape;
std::vector<int64_t> permutation;
};
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers);
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape);
inline constexpr absl::string_view kWorkspaceAllocationCustomCallTarget =
"__nop";
bool IsWorkspaceAllocationRoot(const HloInstruction& root);
}
}
#endif
#include "xla/service/gpu/cudnn_support_utils.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(&conv));
const Shape& input_shape = conv.operand(0)->shape();
const Shape& kernel_shape = conv.operand(1)->shape();
const Shape& result_shape = conv.shape().tuple_shapes(0);
const auto& dnums = conv.convolution_dimension_numbers();
if (vector_size != 4 && vector_size != 32) {
VLOG(3) << "Unsupported vector size for integer convolution: "
<< vector_size;
return false;
}
if ((vector_size == 32 && !compute_capability.IsAtLeast(7, 5)) ||
!compute_capability.IsAtLeast(6, 1)) {
VLOG(3) << "Compute capability " << compute_capability.ToString()
<< " is not sufficent for int8x" << vector_size
<< " vectorization.";
return false;
}
if (kind != CudnnConvKind::kForward &&
kind != CudnnConvKind::kForwardActivation) {
VLOG(3) << "Convolution kind is not forward or foward-activation: "
<< conv.ToString();
return false;
}
if (!primitive_util::IsIntegralType(input_shape.element_type()) ||
!primitive_util::IsIntegralType(kernel_shape.element_type())) {
VLOG(3) << "Convolution does not accept integer inputs/weights: "
<< conv.ToString();
return false;
}
if (dnums.input_spatial_dimensions().size() != 2 ||
dnums.kernel_spatial_dimensions().size() != 2 ||
dnums.output_spatial_dimensions().size() != 2) {
VLOG(3) << "Convolution is not 2D: " << conv.ToString();
return false;
}
if (vector_size == 32 &&
!primitive_util::IsIntegralType(result_shape.element_type())) {
VLOG(3) << "int8x32 convolutions only support integer output: "
<< conv.ToString();
return false;
}
if (vector_size == 32) {
int64_t W = input_shape.dimensions(dnums.input_spatial_dimensions()[0]);
int64_t H = input_shape.dimensions(dnums.input_spatial_dimensions()[1]);
int64_t R = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[0]);
int64_t S = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[1]);
const int64_t dilationW = conv.window().dimensions()[0].base_dilation();
const int64_t dilationH = conv.window().dimensions()[1].base_dilation();
if ((W <= (R - 1) * dilationW) || (H <= (S - 1) * dilationH)) {
VLOG(3) << "Conv spatial filter/input dimensions are too small for "
"vecotrized int8x32 convolution: "
<< conv.ToString();
return false;
}
}
if (window_util::HasDilation(conv.window())) {
VLOG(3) << "Vectorized integer convolutions do not support dilation: "
<< conv.ToString();
return false;
}
return true;
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers) {
if (shape.rank() != 4 && shape.rank() != 5) {
return Internal("Filter shape has unexpected rank.");
}
const int64_t dO = dimension_numbers.kernel_output_feature_dimension();
const int64_t dI = dimension_numbers.kernel_input_feature_dimension();
const int64_t dH = dimension_numbers.kernel_spatial_dimensions().at(0);
const int64_t dW = dimension_numbers.kernel_spatial_dimensions().at(1);
bool revectorize = shape.rank() == 5;
const int64_t dZ = revectorize ? 10 - dO - dI - dH - dW : -1;
const int64_t vsize = revectorize ? shape.dimensions(dZ) : 1;
if (shape.dimensions(dO) % 32 != 0 ||
shape.dimensions(dI) % (32 / vsize) != 0 ||
(revectorize && vsize != 4 && vsize != 32)) {
return Internal("Filter shape is not vectorizable.");
}
std::vector<int64_t> output = {
shape.dimensions(dO), shape.dimensions(dI) / (32 / vsize),
shape.dimensions(dH), shape.dimensions(dW), 32};
Shape output_shape = ShapeUtil::MakeShape(shape.element_type(), output);
auto calc_index = [&](int dim) {
bool split_v = vsize == 32;
return (revectorize
? (dI < dim ? 2 - split_v : 0) + (dZ < dim ? 1 + split_v : 0)
: (dI < dim ? 3 : 0)) +
(dO < dim ? 3 : 0) + (dH < dim) + (dW < dim);
};
int idx_O = calc_index(dO);
int idx_I = calc_index(dI);
int idx_H = calc_index(dH);
int idx_W = calc_index(dW);
int idx_Y = vsize == 32 ? calc_index(dZ) : idx_I + 1;
int idx_Z = vsize == 4 ? calc_index(dZ) : vsize == 32 ? idx_Y + 1 : idx_I + 2;
std::vector<int64_t> dims(8);
dims[idx_O] = shape.dimensions(dO) / 8;
dims[idx_O + 1] = 4;
dims[idx_O + 2] = 2;
dims[idx_I] = shape.dimensions(dI) / (32 / vsize);
dims[idx_Y] = 8;
dims[idx_Z] = 4;
dims[idx_H] = shape.dimensions(dH);
dims[idx_W] = shape.dimensions(dW);
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {idx_I, idx_H, idx_W, idx_O,
idx_O + 2, idx_Y, idx_O + 1, idx_Z};
return CudnnReorderTransposeConfig{split_shape, output_shape, permutation};
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape) {
if (shape.rank() != 1) {
return Internal("Bias shape has unexpected rank.");
}
if (shape.dimensions(0) % 32 != 0) {
return Internal("Bias shape is not vectorizable.");
}
std::vector<int64_t> dims = {shape.dimensions(0) / 32, 4, 2, 4};
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {0, 2, 1, 3};
return CudnnReorderTransposeConfig{split_shape, shape, permutation};
}
bool IsWorkspaceAllocationRoot(const HloInstruction& root) {
return root.IsRoot() && root.opcode() == HloOpcode::kTuple &&
root.operand_count() == 2 &&
root.operand(1)->IsCustomCall(kWorkspaceAllocationCustomCallTarget) &&
root.operand(1)->operand_count() == 0;
}
}
} | #include "xla/service/gpu/cudnn_support_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class CudnnSupportUtilsTest : public HloTestBase {
public:
absl::StatusOr<HloCustomCallInstruction*> GetCustomCall(
xla::VerifiedHloModule* module, absl::string_view target) {
HloCustomCallInstruction* call = nullptr;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->IsCustomCall(target)) {
VLOG(1) << inst->ToString();
if (call != nullptr) {
return tsl::errors::FailedPrecondition(
"Found more than one custom call.");
}
call = Cast<HloCustomCallInstruction>(inst);
}
}
}
if (call == nullptr) {
return tsl::errors::FailedPrecondition(
"Did not find any matching custom call.");
}
return call;
}
};
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckVectorSize) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 7),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 1),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckComputeCapability) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 0}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 1}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 4}, *conv, 32),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckKind) {
auto moduleFwd = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleFwd.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleBwdFilter = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
output = f16[10,20,30,40] parameter(1)
result = (f16[2,2,41,40], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdFilter.get(), "__cudnn$convBackwardFilter"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleBwdInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,40] parameter(0)
filter = f16[2,2,41,40] parameter(1)
result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdInput.get(), "__cudnn$convBackwardInput"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckTypes) {
auto moduleS8InOut = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InOut.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleS8InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleF32InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f32[32,10,10,64] parameter(0)
filter = f32[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleF32InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDims) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,10,64] parameter(0)
filter = s8[2,2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b012f_012io->b012f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDilation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,20,20,128], u8[0]) custom-call(input, filter),
window={size=2x2 rhs_dilate=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckAlgo1Dims) {
auto moduleFilterCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,2,2,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,2,2,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleFilterAlmostCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,3,3,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,3,3,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(moduleFilterAlmostCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
class ReorderFilterRank4Test : public ::testing::TestWithParam<std::string> {};
TEST_P(ReorderFilterRank4Test, InferTransposeRank4) {
auto input_dims = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[4] = {0, 0, 0, 0};
shape_dims[dI] = 224;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
EXPECT_EQ(inferred_config.permutation[7], inferred_config.permutation[5] + 1);
}
std::vector<std::string> GeneratePermutations(std::string input_dims) {
std::sort(input_dims.begin(), input_dims.end());
std::vector<std::string> permutations;
do {
permutations.push_back(input_dims);
} while (std::next_permutation(input_dims.begin(), input_dims.end()));
return permutations;
}
INSTANTIATE_TEST_SUITE_P(ReorderTestSuite, ReorderFilterRank4Test,
::testing::ValuesIn(GeneratePermutations("01io")));
class ReorderFilterRank5Test
: public ::testing::TestWithParam<std::tuple<std::string, int>> {};
TEST_P(ReorderFilterRank5Test, InferTransposeRank5) {
auto [input_dims, vsize] = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[5] = {vsize, vsize, vsize, vsize, vsize};
shape_dims[dI] = 224 / vsize;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
}
INSTANTIATE_TEST_SUITE_P(
ReorderTestSuite, ReorderFilterRank5Test,
::testing::Combine(::testing::ValuesIn(GeneratePermutations("01?io")),
::testing::Values(4, 32)));
class ReorderBiasTest : public ::testing::Test {};
TEST_F(ReorderBiasTest, InferTranspose) {
Shape shape = ShapeUtil::MakeShape(U8, {96});
auto bias = HloInstruction::CreateParameter(2, shape, "bias");
Shape unused = ShapeUtil::MakeNil();
auto input = HloInstruction::CreateParameter(0, unused, "input");
auto filter = HloInstruction::CreateParameter(1, unused, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForBiasReordering(shape));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(3, 2, 4, 4));
EXPECT_EQ(inferred_config.permutation[2], 1);
EXPECT_EQ(inferred_config.permutation[3], 3);
}
}
}
} | 2,084 |
#ifndef XLA_SERVICE_GPU_TARGET_UTIL_H_
#define XLA_SERVICE_GPU_TARGET_UTIL_H_
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
enum class TargetIntrinsicID {
kThreadIdx = 0,
kThreadIdy,
kThreadIdz,
kBlockIdx,
kBlockIdy,
kBlockIdz,
kBarrierId,
kBlockDimx,
kBlockDimy,
kBlockDimz,
kGroupBarrierId,
};
enum class TargetDeviceFunctionID {
kAtan2 = 0,
kCbrt,
kCos,
kExp,
kExpm1,
kFmod,
kHypot,
kLog,
kLog1p,
kPow,
kRsqrt,
kSin,
kSqrt,
kTan,
kTanh,
kErf,
};
absl::StatusOr<TargetDeviceFunctionID> GetTargetDeviceFunctionID(HloOpcode);
llvm::CallInst* EmitDeviceFunctionCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_type, PrimitiveType output_type,
const llvm::AttrBuilder& attributes, llvm::IRBuilder<>* b,
absl::string_view name = "");
llvm::CallInst* EmitCallToTargetIntrinsic(
TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands,
absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b);
void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func,
llvm::IRBuilder<>* b);
std::string ObtainDeviceFunctionName(TargetDeviceFunctionID func_id,
PrimitiveType output_type,
llvm::Triple target_triple);
}
}
#endif
#include "xla/service/gpu/target_util.h"
#include <functional>
#include <string>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FPEnv.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
using absl::StrCat;
struct TargetIntrinsics {
llvm::Intrinsic::ID nvptx_intrinsic;
std::variant<llvm::Intrinsic::ID,
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>
amdgpu_intrinsic_or_function;
std::variant<llvm::Intrinsic::ID,
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>
spir_intrinsic_or_function;
};
struct TargetIntrinsics GetIntrinsic(TargetIntrinsicID intrin) {
switch (intrin) {
case TargetIntrinsicID::kThreadIdx: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x,
llvm::Intrinsic::amdgcn_workitem_id_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(0)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kThreadIdy: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_y,
llvm::Intrinsic::amdgcn_workitem_id_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(1)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kThreadIdz: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_tid_z,
llvm::Intrinsic::amdgcn_workitem_id_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(2)},
{U32}, U64, {b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdx: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x,
llvm::Intrinsic::amdgcn_workgroup_id_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(0)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdy: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_y,
llvm::Intrinsic::amdgcn_workgroup_id_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(1)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBlockIdz: {
return {
llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_z,
llvm::Intrinsic::amdgcn_workgroup_id_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi",
{b_->getInt32(2)}, {U32}, U64,
{b_->getContext()}, b_);
},
};
}
case TargetIntrinsicID::kBarrierId: {
return {llvm::Intrinsic::nvvm_barrier0, llvm::Intrinsic::amdgcn_s_barrier,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z22__spirv_ControlBarrierjjj",
{b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)},
{U32, U32, U32}, U32,
llvm::AttrBuilder(b_->getContext())
.addAttribute(llvm::Attribute::Convergent),
b_);
}};
}
case TargetIntrinsicID::kBlockDimx: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(0)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(0)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kBlockDimy: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_y,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(1)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(1)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kBlockDimz: {
return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_z,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall("__ockl_get_local_size",
{b_->getInt32(2)}, {U32}, U64,
{b_->getContext()}, b_);
},
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(2)},
{U32}, U64, {b_->getContext()}, b_);
}};
}
case TargetIntrinsicID::kGroupBarrierId: {
return {llvm::Intrinsic::nvvm_bar_warp_sync,
llvm::Intrinsic::amdgcn_wave_barrier,
[](llvm::IRBuilder<>* b_) -> llvm::CallInst* {
return EmitDeviceFunctionCall(
"_Z22__spirv_ControlBarrierjjj",
{b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)},
{U32, U32, U32}, U32,
llvm::AttrBuilder(b_->getContext())
.addAttribute(llvm::Attribute::Convergent),
b_);
}};
}
}
}
struct TargetDeviceFunction {
const std::string nvptx_root;
const std::string amdgpu_root;
const std::string spir_root;
};
struct TargetDeviceFunction GetDeviceFunctionRoot(
TargetDeviceFunctionID func_id) {
switch (func_id) {
case TargetDeviceFunctionID::kAtan2: {
return {"__nv_atan2", "__ocml_atan2", "_Z17__spirv_ocl_atan2"};
}
case TargetDeviceFunctionID::kCos: {
return {"__nv_cos", "__ocml_cos", "_Z15__spirv_ocl_cos"};
}
case TargetDeviceFunctionID::kErf: {
return {"__nv_erf", "__ocml_erf", "_Z15__spirv_ocl_erf"};
}
case TargetDeviceFunctionID::kExp: {
return {"__nv_exp", "__ocml_exp", "_Z15__spirv_ocl_exp"};
}
case TargetDeviceFunctionID::kExpm1: {
return {"__nv_expm1", "__ocml_expm1", "_Z17__spirv_ocl_expm1"};
}
case TargetDeviceFunctionID::kFmod: {
return {"__nv_fmod", "__ocml_fmod", "_Z16__spirv_ocl_fmod"};
}
case TargetDeviceFunctionID::kHypot: {
return {"__nv_hypot", "__ocml_hypot", "_Z17__spirv_ocl_hypot"};
}
case TargetDeviceFunctionID::kLog: {
return {"__nv_log", "__ocml_log", "_Z15__spirv_ocl_log"};
}
case TargetDeviceFunctionID::kLog1p: {
return {"__nv_log1p", "__ocml_log1p", "_Z17__spirv_ocl_log1p"};
}
case TargetDeviceFunctionID::kPow: {
return {"__nv_pow", "__ocml_pow", "_Z15__spirv_ocl_pow"};
}
case TargetDeviceFunctionID::kRsqrt: {
return {"__nv_rsqrt", "__ocml_rsqrt", "_Z17__spirv_ocl_rsqrt"};
}
case TargetDeviceFunctionID::kSin: {
return {"__nv_sin", "__ocml_sin", "_Z15__spirv_ocl_sin"};
}
case TargetDeviceFunctionID::kSqrt: {
return {"__nv_sqrt", "__ocml_sqrt", "_Z16__spirv_ocl_sqrt"};
}
case TargetDeviceFunctionID::kTan: {
return {"__nv_tan", "__ocml_tan", "_Z15__spirv_ocl_tan"};
}
case TargetDeviceFunctionID::kTanh: {
return {"__nv_tanh", "__ocml_tanh", "_Z16__spirv_ocl_tanh"};
}
case TargetDeviceFunctionID::kCbrt: {
return {"__nv_cbrt", "__ocml_cbrt", "_Z16__spirv_ocl_cbrt"};
}
}
}
}
absl::StatusOr<TargetDeviceFunctionID> GetTargetDeviceFunctionID(HloOpcode op) {
switch (op) {
case HloOpcode::kAtan2:
return TargetDeviceFunctionID::kAtan2;
case HloOpcode::kCos:
return TargetDeviceFunctionID::kCos;
case HloOpcode::kExp:
return TargetDeviceFunctionID::kExp;
case HloOpcode::kErf:
return TargetDeviceFunctionID::kErf;
case HloOpcode::kExpm1:
return TargetDeviceFunctionID::kExpm1;
case HloOpcode::kLog:
return TargetDeviceFunctionID::kLog;
case HloOpcode::kLog1p:
return TargetDeviceFunctionID::kLog1p;
case HloOpcode::kPower:
return TargetDeviceFunctionID::kPow;
case HloOpcode::kRemainder:
return TargetDeviceFunctionID::kFmod;
case HloOpcode::kRsqrt:
return TargetDeviceFunctionID::kRsqrt;
case HloOpcode::kSin:
return TargetDeviceFunctionID::kSin;
case HloOpcode::kSqrt:
return TargetDeviceFunctionID::kSqrt;
case HloOpcode::kTan:
return TargetDeviceFunctionID::kTan;
case HloOpcode::kTanh:
return TargetDeviceFunctionID::kTanh;
case HloOpcode::kCbrt:
return TargetDeviceFunctionID::kCbrt;
default:
break;
}
return NotFound("The HLO opcode %s is not mapped to a device function",
HloOpcodeString(op));
}
std::string ObtainDeviceFunctionName(TargetDeviceFunctionID func_id,
PrimitiveType output_type,
llvm::Triple target_triple) {
struct TargetDeviceFunction gpu_root_names = GetDeviceFunctionRoot(func_id);
if (target_triple.isNVPTX()) {
if (output_type == F32) {
return StrCat(gpu_root_names.nvptx_root, "f");
} else if (output_type == F64) {
return gpu_root_names.nvptx_root;
} else {
LOG(FATAL) << "Unexpected type while getting device function name: "
<< primitive_util::LowercasePrimitiveTypeName(output_type);
}
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
if (output_type == F32) {
return StrCat(gpu_root_names.amdgpu_root, "_f32");
} else if (output_type == F64) {
return StrCat(gpu_root_names.amdgpu_root, "_f64");
} else {
LOG(FATAL) << "Unexpected type while getting device function name.";
}
} else if (target_triple.isSPIR()) {
if (output_type == F32) {
if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" ||
gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" ||
gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" ||
gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") {
return StrCat(gpu_root_names.spir_root, "ff");
} else {
return StrCat(gpu_root_names.spir_root, "f");
}
} else if (output_type == F64) {
if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" ||
gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" ||
gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" ||
gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") {
return StrCat(gpu_root_names.spir_root, "dd");
} else {
return StrCat(gpu_root_names.spir_root, "d");
}
} else {
LOG(FATAL) << "Unexpected type while getting device function name.";
}
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
llvm::CallInst* EmitDeviceFunctionCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
const llvm::AttrBuilder& attributes, llvm::IRBuilder<>* b,
absl::string_view name) {
std::vector<llvm::Type*> ir_input_types;
llvm::Module* module = b->GetInsertBlock()->getModule();
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
for (PrimitiveType input_type : input_types) {
ir_input_types.push_back(
llvm_ir::PrimitiveTypeToIrType(input_type, module));
}
llvm::FunctionType* callee_type = llvm::FunctionType::get(
llvm_ir::PrimitiveTypeToIrType(output_type, module),
ir_input_types,
false);
llvm::Function* callee = llvm::dyn_cast<llvm::Function>(
b->GetInsertBlock()
->getModule()
->getOrInsertFunction(callee_name, callee_type)
.getCallee());
callee->addFnAttrs(attributes);
if (target_triple.isSPIR())
callee->setCallingConv(llvm::CallingConv::SPIR_FUNC);
return b->CreateCall(callee, llvm_ir::AsArrayRef(operands), name.data());
}
llvm::CallInst* EmitCallToTargetIntrinsic(
TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands,
absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b) {
llvm::Module* module = b->GetInsertBlock()->getModule();
struct TargetIntrinsics gpu_intrinsic_id = GetIntrinsic(intrinsic_id);
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
llvm::Intrinsic::ID llvm_intrinsic_id = llvm::Intrinsic::not_intrinsic;
if (target_triple.isNVPTX()) {
llvm_intrinsic_id = gpu_intrinsic_id.nvptx_intrinsic;
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
llvm::Intrinsic::ID* llvm_intrinsic_id_ptr =
std::get_if<llvm::Intrinsic::ID>(
&gpu_intrinsic_id.amdgpu_intrinsic_or_function);
if (llvm_intrinsic_id_ptr) {
llvm_intrinsic_id = *llvm_intrinsic_id_ptr;
} else {
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func =
std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>(
&gpu_intrinsic_id.amdgpu_intrinsic_or_function);
return (*builder_func)(b);
}
} else if (target_triple.isSPIR()) {
llvm::Intrinsic::ID* llvm_intrinsic_id_ptr =
std::get_if<llvm::Intrinsic::ID>(
&gpu_intrinsic_id.spir_intrinsic_or_function);
if (llvm_intrinsic_id_ptr) {
llvm_intrinsic_id = *llvm_intrinsic_id_ptr;
} else {
std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func =
std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>(
&gpu_intrinsic_id.spir_intrinsic_or_function);
return (*builder_func)(b);
}
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(
module, llvm_intrinsic_id, llvm_ir::AsArrayRef(overloaded_types));
return b->CreateCall(intrinsic, llvm_ir::AsArrayRef(operands));
}
void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func,
llvm::IRBuilder<>* b) {
llvm::Triple target_triple = llvm::Triple(module->getTargetTriple());
if (target_triple.isNVPTX()) {
llvm::LLVMContext& context = module->getContext();
llvm::NamedMDNode* nvvm_annotations_node =
module->getOrInsertNamedMetadata("nvvm.annotations");
nvvm_annotations_node->addOperand(llvm::MDNode::get(
context, {llvm::ConstantAsMetadata::get(func),
llvm::MDString::get(context, "kernel"),
llvm::ConstantAsMetadata::get(b->getInt32(1))}));
} else if (target_triple.getArch() == llvm::Triple::amdgcn) {
func->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
func->addFnAttr("amdgpu-flat-work-group-size", "1, 1024");
} else if (target_triple.isSPIR()) {
func->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
} else {
LOG(FATAL) << "Invalid triple " << target_triple.str();
}
}
}
} | #include "xla/service/gpu/target_util.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/raw_ostream.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class TargetUtilTest : public testing::Test {
public:
TargetUtilTest() : module_("test", ctx_), builder_(ctx_) {}
protected:
void SetUp() override {
auto fn = llvm::Function::Create(
llvm::FunctionType::get(llvm::Type::getVoidTy(ctx_), {}),
llvm::Function::LinkageTypes::ExternalLinkage, "fn", module_);
auto block = llvm::BasicBlock::Create(ctx_, "blk", fn);
builder_.SetInsertPoint(block);
}
llvm::LLVMContext ctx_;
llvm::Module module_;
llvm::IRBuilder<> builder_;
};
TEST_F(TargetUtilTest, NVPTXGroupBarrier) {
module_.setTargetTriple("nvptx");
EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId,
{builder_.getInt32(-1)}, {},
&builder_);
builder_.CreateRetVoid();
EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs()));
}
TEST_F(TargetUtilTest, AMDGCNGroupBarrier) {
module_.setTargetTriple("amdgcn");
EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId, {}, {},
&builder_);
builder_.CreateRetVoid();
EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs()));
}
}
}
} | 2,085 |
#ifndef XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_VALID_ITERATION_ANNOTATOR_H_
#define XLA_SERVICE_GPU_COLLECTIVE_PERMUTE_VALID_ITERATION_ANNOTATOR_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePermuteValidIterationAnnotator : public HloModulePass {
public:
CollectivePermuteValidIterationAnnotator() = default;
absl::string_view name() const override {
return "collective-permute-valid-iteration-annotator";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/collective_permute_valid_iteration_annotator.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/while_loop_analysis.h"
namespace xla {
static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
}
std::optional<int64_t> GetStep(HloInstruction* while_inst) {
std::optional<int64_t> indvar_tuple_idx =
GetLoopInductionVarTupleIdx(while_inst);
if (!indvar_tuple_idx) {
return std::nullopt;
};
auto* while_body_indvar_update =
while_inst->while_body()->root_instruction()->mutable_operand(
*indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
HloInstruction* trip_count_increase_step_instr = nullptr;
if (!Match(while_body_indvar_update,
match::AddAnyOrder(match::Op().Is(while_body_indvar),
match::Op(&trip_count_increase_step_instr)))) {
return std::nullopt;
}
return LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal());
}
absl::StatusOr<bool> CollectivePermuteValidIterationAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->opcode() != HloOpcode::kCollectivePermute) {
continue;
}
if (inst->frontend_attributes().map().find(kSendRecvValidationAttr) !=
inst->frontend_attributes().map().end()) {
continue;
}
auto sourceTargetPairs = inst->source_target_pairs();
if (!IsForwardCycle(sourceTargetPairs) &&
!IsBackwardCycle(sourceTargetPairs)) {
continue;
}
VLOG(2) << "Collective permute with cycle: " << inst->ToString();
int64_t max_device_num = -1;
for (auto [source, target] : sourceTargetPairs) {
max_device_num = std::max(std::max(source, target), max_device_num);
}
int64_t num_devices = max_device_num + 1;
HloInstruction* whileOp = inst->parent()->WhileCallInstruction();
if (whileOp == nullptr) {
VLOG(2) << "No surrounding while op found. Ignoring " << inst->name();
continue;
}
if (!whileOp->frontend_attributes().map().contains(
"is_pipelined_while_loop"))
continue;
TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config,
whileOp->backend_config<WhileLoopBackendConfig>());
if (!config.has_known_trip_count()) {
VLOG(2) << "Trip count for while loop (" << whileOp->name()
<< "): unknown";
continue;
}
int64_t trip_count = config.known_trip_count().n();
std::optional<int64_t> step = GetStep(whileOp);
VLOG(2) << "Trip count for while loop (" << whileOp->name()
<< "): " << trip_count;
if (!step) {
VLOG(2) << "Could not find step for while operation";
continue;
}
VLOG(2) << "Step for while loop (" << whileOp->name() << "): " << *step;
if (*step != 1) {
VLOG(2) << "Step is not 1. Skipping...";
continue;
}
int64_t offset = trip_count - num_devices;
std::vector<std::pair<int64_t, int64_t>> sendRecvValidation(
sourceTargetPairs.size());
for (size_t currIdx = 0; currIdx < sourceTargetPairs.size(); currIdx++) {
sendRecvValidation[currIdx] = {currIdx, currIdx + offset};
}
if (IsBackwardCycle(sourceTargetPairs)) {
std::reverse(sendRecvValidation.begin(), sendRecvValidation.end());
}
xla::FrontendAttributes attributes;
std::string iteration_instances =
"{" +
absl::StrJoin(sendRecvValidation, ",",
[](std::string* out, std::pair<int64_t, int64_t> item) {
absl::StrAppend(out, "{", item.first, ",",
item.second, "}");
}) +
"}";
(*attributes.mutable_map())[kSendRecvValidationAttr] =
iteration_instances;
inst->add_frontend_attributes(attributes);
VLOG(1) << "Adding " << kSendRecvValidationAttr << " to " << inst->name()
<< ": " << iteration_instances;
changed = true;
}
}
return changed;
}
} | #include "xla/service/gpu/collective_permute_valid_iteration_annotator.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using CollectivePermuteValidIterationAnnotatorTest = HloTestBase;
TEST_F(CollectivePermuteValidIterationAnnotatorTest, NoChange) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %permute)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_FALSE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_EQ(sendRecvValidationIt, cp->frontend_attributes().map().end());
}
TEST_F(CollectivePermuteValidIterationAnnotatorTest, ForwardCycle) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end());
std::string sendRecvValidationAttr = sendRecvValidationIt->second;
EXPECT_EQ(sendRecvValidationAttr, "{{0,6},{1,7},{2,8},{3,9}}");
}
TEST_F(CollectivePermuteValidIterationAnnotatorTest, BackwardCycle) {
absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->(s32[], s32[])}
%Body (param: (s32[], s32[])) -> (s32[], s32[]) {
%param = (s32[], s32[]) parameter(0)
%i = s32[] get-tuple-element((s32[], s32[]) %param), index=1
%one = s32[] constant(1)
%i_plus_one = s32[] add(s32[] %i, s32[] %one)
%permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)
}
%Cond (param.1: (s32[], s32[])) -> pred[] {
%param.1 = (s32[], s32[]) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1
%trip_count = s32[] constant(10)
ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT
}
ENTRY %test () -> (s32[], s32[]) {
%i_start = s32[] constant(0)
%p_start = s32[] constant(0)
%initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)
ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, 1, 4));
HloPassPipeline pipeline("my-pass-pipeline");
pipeline.AddPass<WhileLoopTripCountAnnotator>();
pipeline.AddPass<CollectivePermuteValidIterationAnnotator>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
HloCollectivePermuteInstruction* cp =
DynCastOrNull<HloCollectivePermuteInstruction>(
FindInstruction(module.get(), HloOpcode::kCollectivePermute));
ASSERT_NE(cp, nullptr);
auto sendRecvValidationIt =
cp->frontend_attributes().map().find(kSendRecvValidationAttr);
ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end());
std::string sendRecvValidationAttr = sendRecvValidationIt->second;
EXPECT_EQ(sendRecvValidationAttr, "{{3,9},{2,8},{1,7},{0,6}}");
}
}
} | 2,086 |