Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mayne lsu #216

Open
wants to merge 29 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
c18dd7b
adding store buffer
MayneMei Nov 1, 2024
ab4466e
modified return type of some store function, and reorder the constructor
MayneMei Nov 1, 2024
589165c
change forwarding at handleCacheLookupReq_, roll back handleCacheRead_
MayneMei Nov 1, 2024
089b47f
passed regression test, mode test case needed
MayneMei Nov 18, 2024
7f06091
modified test case, also disable store sned cache lookup req when retire
MayneMei Nov 25, 2024
fdba6c8
modified test to check forwarding cycle
MayneMei Nov 25, 2024
77fa028
Merge branch 'master' into mayne_lsu
MayneMei Nov 25, 2024
22bd6bf
Update Lsu_test.cpp
MayneMei Nov 29, 2024
24ff983
Update LSU.hpp, add parameter for data forwarding, set default value …
MayneMei Nov 29, 2024
402f57f
Update Lsu_test.cpp
MayneMei Nov 29, 2024
108e839
data forwarding parameterize, modified testcase
MayneMei Nov 29, 2024
1a4a3b7
typo fixing
MayneMei Nov 29, 2024
a1d5e04
typo fixing
MayneMei Nov 29, 2024
1e2dd26
test typo fixing
MayneMei Nov 29, 2024
da1a50a
add helpfer function for test
MayneMei Nov 29, 2024
b6d1a05
syntax error
MayneMei Nov 29, 2024
57158ba
modified test_case and little syntax in lsu
MayneMei Nov 29, 2024
b73dba2
chagne erase to pop front(), don't have local machine right now so on…
MayneMei Dec 2, 2024
e578f6d
change store buffer to deque
MayneMei Dec 2, 2024
a196f00
added debug info
MayneMei Dec 2, 2024
ed13f0b
different debug info
MayneMei Dec 2, 2024
27ce54e
store_buffer_initialization
MayneMei Dec 2, 2024
2877c55
initalization sequence change
MayneMei Dec 2, 2024
5d4dc71
comment cout
MayneMei Dec 2, 2024
5711010
cout debugging
MayneMei Dec 2, 2024
c1e1920
roll back ti using buffer instead of deque
MayneMei Dec 2, 2024
56fd442
modified test, cycle matched expectation
MayneMei Dec 6, 2024
972b169
added documentation
MayneMei Dec 13, 2024
6dbfd85
Merge branch 'master' into mayne_lsu
MayneMei Jan 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
172 changes: 131 additions & 41 deletions core/LSU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ namespace olympia
replay_buffer_("replay_buffer", p->replay_buffer_size, getClock()),
replay_buffer_size_(p->replay_buffer_size),
replay_issue_delay_(p->replay_issue_delay),
store_buffer_("store_buffer", p->ldst_inst_queue_size, getClock()), // Add this line
store_buffer_size_(p->ldst_inst_queue_size),
ready_queue_(),
load_store_info_allocator_(sparta::notNull(OlympiaAllocators::getOlympiaAllocators(node))
->load_store_info_allocator),
Expand All @@ -31,7 +33,7 @@ namespace olympia
cache_read_stage_(cache_lookup_stage_
+ 1), // Get data from the cache in the cycle after cache lookup
complete_stage_(
cache_read_stage_
cache_read_stage_
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please set up your editor to remove extraneous (dead) whitespace from end of lines.

+ p->cache_read_stage_length), // Complete stage is after the cache read stage
ldst_pipeline_("LoadStorePipeline", (complete_stage_ + 1),
getClock()), // complete_stage_ + 1 is number of stages
Expand All @@ -48,6 +50,7 @@ namespace olympia
ldst_pipeline_.enableCollection(node);
ldst_inst_queue_.enableCollection(node);
replay_buffer_.enableCollection(node);
store_buffer_.enableCollection(node);

// Startup handler for sending initial credits
sparta::StartupEvent(node, CREATE_SPARTA_HANDLER(LSU, sendInitialCredits_));
Expand Down Expand Up @@ -177,6 +180,12 @@ namespace olympia
{
ILOG("New instruction added to the ldst queue " << inst_ptr);
allocateInstToIssueQueue_(inst_ptr);
// allocate to Store buffer
if (inst_ptr->isStoreInst())
{
allocateInstToStoreBuffer_(inst_ptr);
}

handleOperandIssueCheck_(inst_ptr);
lsu_insts_dispatched_++;
}
Expand Down Expand Up @@ -265,7 +274,20 @@ namespace olympia
sparta_assert(inst_ptr->getStatus() == Inst::Status::RETIRED,
"Get ROB Ack, but the store inst hasn't retired yet!");

++stores_retired_;
if (inst_ptr->isStoreInst())
{
auto oldest_store = getOldestStore_();
sparta_assert(oldest_store && oldest_store->getInstPtr()->getUniqueID() == inst_ptr->getUniqueID(),
"Attempting to retire store out of order! Expected: "
<< (oldest_store ? oldest_store->getInstPtr()->getUniqueID() : 0)
<< " Got: " << inst_ptr->getUniqueID());

// Remove from store buffer and commit to cache
out_cache_lookup_req_.send(oldest_store->getMemoryAccessInfoPtr());
store_buffer_.erase(store_buffer_.begin());;
++stores_retired_;
}


updateIssuePriorityAfterStoreInstRetire_(inst_ptr);
if (isReadyToIssueInsts_())
Expand Down Expand Up @@ -438,6 +460,31 @@ namespace olympia
const MemoryAccessInfoPtr & mem_access_info_ptr =
load_store_info_ptr->getMemoryAccessInfoPtr();
const bool phy_addr_is_ready = mem_access_info_ptr->getPhyAddrStatus();
const InstPtr & inst_ptr = mem_access_info_ptr->getInstPtr();

// first check physical address and bypass conditions
const bool is_already_hit =
(mem_access_info_ptr->getCacheState() == MemoryAccessInfo::CacheState::HIT);
const bool is_unretired_store =
inst_ptr->isStoreInst() && (inst_ptr->getStatus() != Inst::Status::RETIRED);
const bool cache_bypass = is_already_hit || !phy_addr_is_ready || is_unretired_store;

if (cache_bypass)
{
if (is_already_hit)
{
ILOG("Cache Lookup is skipped (Cache already hit)");
}
else if (is_unretired_store)
{
ILOG("Cache Lookup is skipped (store instruction not oldest)");
}
else
{
sparta_assert(false, "Cache access is bypassed without a valid reason!");
}
return;
}

// If we did not have an MMU hit from previous stage, invalidate and bail
if (false == phy_addr_is_ready)
Expand All @@ -462,66 +509,59 @@ namespace olympia
return;
}

const InstPtr & inst_ptr = mem_access_info_ptr->getInstPtr();
ILOG(load_store_info_ptr << " " << mem_access_info_ptr);

// If have passed translation and the instruction is a store,
// then it's good to be retired (i.e. mark it completed).
// Stores typically do not cause a flush after a successful
// translation. We now wait for the Retire block to "retire"
// it, meaning it's good to go to the cache
if (inst_ptr->isStoreInst() && (inst_ptr->getStatus() == Inst::Status::SCHEDULED))
if (inst_ptr->isStoreInst())
{
ILOG("Store marked as completed " << inst_ptr);
inst_ptr->setStatus(Inst::Status::COMPLETED);
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
if (allow_speculative_load_exec_)
if (inst_ptr->getStatus() == Inst::Status::SCHEDULED)
{
updateInstReplayReady_(load_store_info_ptr);
ILOG("Store marked as completed " << inst_ptr);
inst_ptr->setStatus(Inst::Status::COMPLETED);
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
if (allow_speculative_load_exec_)
{
updateInstReplayReady_(load_store_info_ptr);
}
return;
}
return;
}

// Loads dont perform a cache lookup if there are older stores present in the load store
// queue
if (!inst_ptr->isStoreInst() && olderStoresExists_(inst_ptr)
&& allow_speculative_load_exec_)
else // Loads handling
{
ILOG("Dropping speculative load " << inst_ptr);
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
if (allow_speculative_load_exec_)
// Check for speculative execution constraints
// since we use data forwarding, we only need to check whether all older store was issued
if (allow_speculative_load_exec_ && !allOlderStoresIssued_(inst_ptr))
{
ILOG("Dropping speculative load " << inst_ptr << " due to unissued older stores");
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
updateInstReplayReady_(load_store_info_ptr);
return;
}
return;
}

const bool is_already_hit =
(mem_access_info_ptr->getCacheState() == MemoryAccessInfo::CacheState::HIT);
const bool is_unretired_store =
inst_ptr->isStoreInst() && (inst_ptr->getStatus() != Inst::Status::RETIRED);
const bool cache_bypass = is_already_hit || !phy_addr_is_ready || is_unretired_store;
//check if we can forward from store buffer first
uint64_t load_addr = inst_ptr->getTargetVAddr();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

const uint64_t

auto forwarding_store = findYoungestMatchingStore_(load_addr);

if (cache_bypass)
{
if (is_already_hit)
if (forwarding_store)
{
ILOG("Cache Lookup is skipped (Cache already hit)");
}
else if (is_unretired_store)
{
ILOG("Cache Lookup is skipped (store instruction not oldest)");
ILOG("Found forwarding store for load " << inst_ptr);
mem_access_info_ptr->setDataReady(true);
mem_access_info_ptr->setCacheState(MemoryAccessInfo::CacheState::HIT);
return;
}
else
{
sparta_assert(false, "Cache access is bypassed without a valid reason!");

// No forwarding possible - need cache access
if (!mem_access_info_ptr->isCacheHit()) {
out_cache_lookup_req_.send(mem_access_info_ptr);
}
return;
}

out_cache_lookup_req_.send(mem_access_info_ptr);

}

void LSU::getAckFromCache_(const MemoryAccessInfoPtr & mem_access_info_ptr)
Expand Down Expand Up @@ -790,6 +830,7 @@ namespace olympia
flushIssueQueue_(criteria);
flushReplayBuffer_(criteria);
flushReadyQueue_(criteria);
flushStoreBuffer_(criteria);

// Cancel replay events
auto flush = [&criteria](const LoadStoreInstInfoPtr & ldst_info_ptr) -> bool
Expand Down Expand Up @@ -894,6 +935,40 @@ namespace olympia
ILOG("Append new load/store instruction to issue queue!");
}

void LSU::allocateInstToStoreBuffer_(const InstPtr & inst_ptr)
{
auto store_info_ptr = createLoadStoreInst_(inst_ptr);

sparta_assert(store_buffer_.size() < ldst_inst_queue_size_,
"Appending store buffer causes overflows!");

store_buffer_.push_back(store_info_ptr);
ILOG("Store added to store buffer: " << inst_ptr);
}

LoadStoreInstInfoPtr LSU::findYoungestMatchingStore_(uint64_t addr)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Method can be const

{
LoadStoreInstInfoPtr matching_store = nullptr;

for (auto it = store_buffer_.begin(); it != store_buffer_.end(); ++it)
{
auto & store = *it;
if (store->getInstPtr()->getTargetVAddr() == addr)
{
matching_store = store;
}
}
return matching_store;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use std::find_if

}

LoadStoreInstInfoPtr LSU::getOldestStore_() const
{
if(store_buffer_.empty()) {
return nullptr;
}
return store_buffer_.read(0);
}

bool LSU::allOlderStoresIssued_(const InstPtr & inst_ptr)
{
for (const auto & ldst_info_ptr : ldst_inst_queue_)
Expand Down Expand Up @@ -1368,4 +1443,19 @@ namespace olympia
}
}

void LSU::flushStoreBuffer_(const FlushCriteria & criteria)
{
auto sb_iter = store_buffer_.begin();
while(sb_iter != store_buffer_.end()) {
auto inst_ptr = (*sb_iter)->getInstPtr();
if(criteria.includedInFlush(inst_ptr)) {
auto delete_iter = sb_iter++;
store_buffer_.erase(delete_iter);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
auto delete_iter = sb_iter++;
store_buffer_.erase(delete_iter);
sb_iter = store_buffer_.erase(sb_iter);

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi Knute, thank you for you suggestions. One question I have is that whether I need to make "data_forwarding" as an variable similar to "allow_speculative_load_exec_", so that user can manually choose whether to use this feature?

ILOG("Flushed store from store buffer: " << inst_ptr);
} else {
++sb_iter;
}
}
}

} // namespace olympia
16 changes: 16 additions & 0 deletions core/LSU.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ namespace olympia
PARAMETER(uint32_t, ldst_inst_queue_size, 8, "LSU ldst inst queue size")
PARAMETER(uint32_t, replay_buffer_size, ldst_inst_queue_size, "Replay buffer size")
PARAMETER(uint32_t, replay_issue_delay, 3, "Replay Issue delay")
// PARAMETER(uint32_t, store_buffer_size, ldst_inst_queue_size, "Size of the store buffer")
// LSU microarchitecture parameters
PARAMETER(
bool, allow_speculative_load_exec, true,
Expand Down Expand Up @@ -137,6 +138,10 @@ namespace olympia
const uint32_t replay_buffer_size_;
const uint32_t replay_issue_delay_;

// Store Buffer
sparta::Buffer<LoadStoreInstInfoPtr> store_buffer_;
const uint32_t store_buffer_size_;

sparta::PriorityQueue<LoadStoreInstInfoPtr> ready_queue_;
// MMU unit
bool mmu_busy_ = false;
Expand Down Expand Up @@ -258,6 +263,15 @@ namespace olympia

void allocateInstToIssueQueue_(const InstPtr & inst_ptr);

// allocate store inst to store buffer
void allocateInstToStoreBuffer_(const InstPtr & inst_ptr);

// Search store buffer in FIFO order for youngest matching store
LoadStoreInstInfoPtr findYoungestMatchingStore_(uint64_t addr);

// get oldest store
LoadStoreInstInfoPtr getOldestStore_() const;

bool olderStoresExists_(const InstPtr & inst_ptr);

bool allOlderStoresIssued_(const InstPtr & inst_ptr);
Expand Down Expand Up @@ -315,6 +329,8 @@ namespace olympia
// Flush Replay Buffer
void flushReplayBuffer_(const FlushCriteria &);

void flushStoreBuffer_(const FlushCriteria &);

// Counters
sparta::Counter lsu_insts_dispatched_{getStatisticSet(), "lsu_insts_dispatched",
"Number of LSU instructions dispatched",
Expand Down
Loading