Skip to content

Commit 32d7a69

Browse files
authored
[Reclaiming buffer] Common code update (sonic-net#1996)
- What I did Common code update for reclaiming buffer. 1. Loading zero_profiles when dynamic buffer manager startingץ The buffer manager won't consume it for now. This is to pass Azure CI. 2. Support removing a buffer pool. 3. Support exposing maximum PGs and queues per port 4. Support transmit between bitmap and map string 5. Change the log severity from ERROR to NOTICE when parsing buffer profile from buffer profile list failed. Typically this can be resolved by retrying. The severity of similar log when parsing buffer PG and queue is already NOTICE. - Why I did it To split large PR into smaller ones and help pass CI. - How I verified it vs test and sonic-mgmt test. Signed-off-by: Stephen Sun <[email protected]>
1 parent b91d8ba commit 32d7a69

10 files changed

+262
-41
lines changed

cfgmgr/buffermgrd.cpp

+24-13
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,13 @@ mutex gDbMutex;
3838

3939
void usage()
4040
{
41-
cout << "Usage: buffermgrd <-l pg_lookup.ini|-a asic_table.json [-p peripheral_table.json]>" << endl;
41+
cout << "Usage: buffermgrd <-l pg_lookup.ini|-a asic_table.json [-p peripheral_table.json] [-z zero_profiles.json]>" << endl;
4242
cout << " -l pg_lookup.ini: PG profile look up table file (mandatory for static mode)" << endl;
4343
cout << " format: csv" << endl;
4444
cout << " values: 'speed, cable, size, xon, xoff, dynamic_threshold, xon_offset'" << endl;
4545
cout << " -a asic_table.json: ASIC-specific parameters definition (mandatory for dynamic mode)" << endl;
46-
cout << " -p peripheral_table.json: Peripheral (eg. gearbox) parameters definition (mandatory for dynamic mode)" << endl;
46+
cout << " -p peripheral_table.json: Peripheral (eg. gearbox) parameters definition (optional for dynamic mode)" << endl;
47+
cout << " -z zero_profiles.json: Zero profiles definition for reclaiming unused buffers (optional for dynamic mode)" << endl;
4748
}
4849

4950
void dump_db_item(KeyOpFieldsValuesTuple &db_item)
@@ -109,13 +110,13 @@ int main(int argc, char **argv)
109110
string pg_lookup_file = "";
110111
string asic_table_file = "";
111112
string peripherial_table_file = "";
112-
string json_file = "";
113+
string zero_profile_file = "";
113114
Logger::linkToDbNative("buffermgrd");
114115
SWSS_LOG_ENTER();
115116

116117
SWSS_LOG_NOTICE("--- Starting buffermgrd ---");
117118

118-
while ((opt = getopt(argc, argv, "l:a:p:h")) != -1 )
119+
while ((opt = getopt(argc, argv, "l:a:p:z:h")) != -1 )
119120
{
120121
switch (opt)
121122
{
@@ -131,6 +132,9 @@ int main(int argc, char **argv)
131132
case 'p':
132133
peripherial_table_file = optarg;
133134
break;
135+
case 'z':
136+
zero_profile_file = optarg;
137+
break;
134138
default: /* '?' */
135139
usage();
136140
return EXIT_FAILURE;
@@ -141,7 +145,9 @@ int main(int argc, char **argv)
141145
{
142146
std::vector<Orch *> cfgOrchList;
143147
bool dynamicMode = false;
144-
shared_ptr<vector<KeyOpFieldsValuesTuple>> db_items_ptr;
148+
shared_ptr<vector<KeyOpFieldsValuesTuple>> asic_table_ptr = nullptr;
149+
shared_ptr<vector<KeyOpFieldsValuesTuple>> peripherial_table_ptr = nullptr;
150+
shared_ptr<vector<KeyOpFieldsValuesTuple>> zero_profiles_ptr = nullptr;
145151

146152
DBConnector cfgDb("CONFIG_DB", 0);
147153
DBConnector stateDb("STATE_DB", 0);
@@ -150,18 +156,23 @@ int main(int argc, char **argv)
150156
if (!asic_table_file.empty())
151157
{
152158
// Load the json file containing the SWITCH_TABLE
153-
db_items_ptr = load_json(asic_table_file);
154-
if (nullptr != db_items_ptr)
159+
asic_table_ptr = load_json(asic_table_file);
160+
if (nullptr != asic_table_ptr)
155161
{
156-
write_to_state_db(db_items_ptr);
157-
db_items_ptr.reset();
162+
write_to_state_db(asic_table_ptr);
158163

159164
if (!peripherial_table_file.empty())
160165
{
161166
//Load the json file containing the PERIPHERIAL_TABLE
162-
db_items_ptr = load_json(peripherial_table_file);
163-
if (nullptr != db_items_ptr)
164-
write_to_state_db(db_items_ptr);
167+
peripherial_table_ptr = load_json(peripherial_table_file);
168+
if (nullptr != peripherial_table_ptr)
169+
write_to_state_db(peripherial_table_ptr);
170+
}
171+
172+
if (!zero_profile_file.empty())
173+
{
174+
//Load the json file containing the zero profiles
175+
zero_profiles_ptr = load_json(zero_profile_file);
165176
}
166177

167178
dynamicMode = true;
@@ -183,7 +194,7 @@ int main(int argc, char **argv)
183194
TableConnector(&stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE),
184195
TableConnector(&stateDb, STATE_PORT_TABLE_NAME)
185196
};
186-
cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, buffer_table_connectors, db_items_ptr));
197+
cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, buffer_table_connectors, peripherial_table_ptr, zero_profiles_ptr));
187198
}
188199
else if (!pg_lookup_file.empty())
189200
{

cfgmgr/buffermgrdyn.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
using namespace std;
2828
using namespace swss;
2929

30-
BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const vector<TableConnector> &tables, shared_ptr<vector<KeyOpFieldsValuesTuple>> gearboxInfo = nullptr) :
30+
BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const vector<TableConnector> &tables, shared_ptr<vector<KeyOpFieldsValuesTuple>> gearboxInfo, shared_ptr<vector<KeyOpFieldsValuesTuple>> zeroProfilesInfo) :
3131
Orch(tables),
3232
m_platform(),
3333
m_applDb(applDb),

cfgmgr/buffermgrdyn.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ typedef std::map<std::string, std::string> gearbox_delay_t;
128128
class BufferMgrDynamic : public Orch
129129
{
130130
public:
131-
BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const std::vector<TableConnector> &tables, std::shared_ptr<std::vector<KeyOpFieldsValuesTuple>> gearboxInfo);
131+
BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const std::vector<TableConnector> &tables, std::shared_ptr<std::vector<KeyOpFieldsValuesTuple>> gearboxInfo, std::shared_ptr<std::vector<KeyOpFieldsValuesTuple>> zeroProfilesInfo);
132132
using Orch::doTask;
133133

134134
private:

orchagent/bufferorch.cpp

+48-18
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,15 @@ bool BufferOrch::isPortReady(const std::string& port_name) const
210210
return result;
211211
}
212212

213+
void BufferOrch::clearBufferPoolWatermarkCounterIdList(const sai_object_id_t object_id)
214+
{
215+
if (m_isBufferPoolWatermarkCounterIdListGenerated)
216+
{
217+
string key = BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP ":" + sai_serialize_object_id(object_id);
218+
m_flexCounterTable->del(key);
219+
}
220+
}
221+
213222
void BufferOrch::generateBufferPoolWatermarkCounterIdList(void)
214223
{
215224
// This function will be called in FlexCounterOrch when field:value tuple "FLEX_COUNTER_STATUS":"enable"
@@ -460,6 +469,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple)
460469

461470
if (SAI_NULL_OBJECT_ID != sai_object)
462471
{
472+
clearBufferPoolWatermarkCounterIdList(sai_object);
463473
sai_status = sai_buffer_api->remove_buffer_pool(sai_object);
464474
if (SAI_STATUS_SUCCESS != sai_status)
465475
{
@@ -699,6 +709,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
699709
string op = kfvOp(tuple);
700710
vector<string> tokens;
701711
sai_uint32_t range_low, range_high;
712+
bool need_update_sai = true;
702713

703714
SWSS_LOG_DEBUG("Processing:%s", key.c_str());
704715
tokens = tokenize(key, delimiter);
@@ -736,6 +747,12 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
736747
}
737748
else if (op == DEL_COMMAND)
738749
{
750+
auto &typemap = (*m_buffer_type_maps[APP_BUFFER_QUEUE_TABLE_NAME]);
751+
if (typemap.find(key) == typemap.end())
752+
{
753+
SWSS_LOG_INFO("%s doesn't not exist, don't need to notfiy SAI", key.c_str());
754+
need_update_sai = false;
755+
}
739756
sai_buffer_profile = SAI_NULL_OBJECT_ID;
740757
SWSS_LOG_NOTICE("Remove buffer queue %s", key.c_str());
741758
removeObject(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key);
@@ -760,7 +777,6 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
760777
}
761778
for (size_t ind = range_low; ind <= range_high; ind++)
762779
{
763-
sai_object_id_t queue_id;
764780
SWSS_LOG_DEBUG("processing queue:%zd", ind);
765781
if (port.m_queue_ids.size() <= ind)
766782
{
@@ -772,16 +788,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
772788
SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str());
773789
return task_process_status::task_need_retry;
774790
}
775-
queue_id = port.m_queue_ids[ind];
776-
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id);
777-
sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr);
778-
if (sai_status != SAI_STATUS_SUCCESS)
791+
if (need_update_sai)
779792
{
780-
SWSS_LOG_ERROR("Failed to set queue's buffer profile attribute, status:%d", sai_status);
781-
task_process_status handle_status = handleSaiSetStatus(SAI_API_QUEUE, sai_status);
782-
if (handle_status != task_process_status::task_success)
793+
sai_object_id_t queue_id;
794+
queue_id = port.m_queue_ids[ind];
795+
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id);
796+
sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr);
797+
if (sai_status != SAI_STATUS_SUCCESS)
783798
{
784-
return handle_status;
799+
SWSS_LOG_ERROR("Failed to set queue's buffer profile attribute, status:%d", sai_status);
800+
task_process_status handle_status = handleSaiSetStatus(SAI_API_QUEUE, sai_status);
801+
if (handle_status != task_process_status::task_success)
802+
{
803+
return handle_status;
804+
}
785805
}
786806
}
787807
}
@@ -823,6 +843,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
823843
string op = kfvOp(tuple);
824844
vector<string> tokens;
825845
sai_uint32_t range_low, range_high;
846+
bool need_update_sai = true;
826847

827848
SWSS_LOG_DEBUG("processing:%s", key.c_str());
828849
tokens = tokenize(key, delimiter);
@@ -861,6 +882,12 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
861882
}
862883
else if (op == DEL_COMMAND)
863884
{
885+
auto &typemap = (*m_buffer_type_maps[APP_BUFFER_PG_TABLE_NAME]);
886+
if (typemap.find(key) == typemap.end())
887+
{
888+
SWSS_LOG_INFO("%s doesn't not exist, don't need to notfiy SAI", key.c_str());
889+
need_update_sai = false;
890+
}
864891
sai_buffer_profile = SAI_NULL_OBJECT_ID;
865892
SWSS_LOG_NOTICE("Remove buffer PG %s", key.c_str());
866893
removeObject(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key);
@@ -886,7 +913,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
886913
}
887914
for (size_t ind = range_low; ind <= range_high; ind++)
888915
{
889-
sai_object_id_t pg_id;
890916
SWSS_LOG_DEBUG("processing pg:%zd", ind);
891917
if (port.m_priority_group_ids.size() <= ind)
892918
{
@@ -901,16 +927,20 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
901927
}
902928
else
903929
{
904-
pg_id = port.m_priority_group_ids[ind];
905-
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to port:%s pg index:%zd, pg sai_id:0x%" PRIx64, sai_buffer_profile, port_name.c_str(), ind, pg_id);
906-
sai_status_t sai_status = sai_buffer_api->set_ingress_priority_group_attribute(pg_id, &attr);
907-
if (sai_status != SAI_STATUS_SUCCESS)
930+
if (need_update_sai)
908931
{
909-
SWSS_LOG_ERROR("Failed to set port:%s pg:%zd buffer profile attribute, status:%d", port_name.c_str(), ind, sai_status);
910-
task_process_status handle_status = handleSaiSetStatus(SAI_API_BUFFER, sai_status);
911-
if (handle_status != task_process_status::task_success)
932+
sai_object_id_t pg_id;
933+
pg_id = port.m_priority_group_ids[ind];
934+
SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to port:%s pg index:%zd, pg sai_id:0x%" PRIx64, sai_buffer_profile, port_name.c_str(), ind, pg_id);
935+
sai_status_t sai_status = sai_buffer_api->set_ingress_priority_group_attribute(pg_id, &attr);
936+
if (sai_status != SAI_STATUS_SUCCESS)
912937
{
913-
return handle_status;
938+
SWSS_LOG_ERROR("Failed to set port:%s pg:%zd buffer profile attribute, status:%d", port_name.c_str(), ind, sai_status);
939+
task_process_status handle_status = handleSaiSetStatus(SAI_API_BUFFER, sai_status);
940+
if (handle_status != task_process_status::task_success)
941+
{
942+
return handle_status;
943+
}
914944
}
915945
}
916946
}

orchagent/bufferorch.h

+1
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ class BufferOrch : public Orch
4545

4646
void doTask() override;
4747
virtual void doTask(Consumer& consumer);
48+
void clearBufferPoolWatermarkCounterIdList(const sai_object_id_t object_id);
4849
void initTableHandlers();
4950
void initBufferReadyLists(DBConnector *confDb, DBConnector *applDb);
5051
void initBufferReadyList(Table& table, bool isConfigDb);

0 commit comments

Comments
 (0)