diff --git a/doc/_static/js/pgcalc.js b/doc/_static/js/pgcalc.js new file mode 100644 index 00000000000..e13c30895fc --- /dev/null +++ b/doc/_static/js/pgcalc.js @@ -0,0 +1,357 @@ +var _____WB$wombat$assign$function_____ = function(name) {return (self._wb_wombat && self._wb_wombat.local_init && self._wb_wombat.local_init(name)) || self[name]; }; +if (!self.__WB_pmw) { self.__WB_pmw = function(obj) { this.__WB_source = obj; return this; } } +{ + let window = _____WB$wombat$assign$function_____("window"); + let self = _____WB$wombat$assign$function_____("self"); + let document = _____WB$wombat$assign$function_____("document"); + let location = _____WB$wombat$assign$function_____("location"); + let top = _____WB$wombat$assign$function_____("top"); + let parent = _____WB$wombat$assign$function_____("parent"); + let frames = _____WB$wombat$assign$function_____("frames"); + let opener = _____WB$wombat$assign$function_____("opener"); + +var pow2belowThreshold = 0.25 +var key_values={}; +key_values['poolName'] ={'name':'Pool Name','default':'newPool','description': 'Name of the pool in question. Typical pool names are included below.', 'width':'30%; text-align: left'}; +key_values['size'] ={'name':'Size','default': 3, 'description': 'Number of replicas the pool will have. Default value of 3 is pre-filled.', 'width':'10%', 'global':1}; +key_values['osdNum'] ={'name':'OSD #','default': 100, 'description': 'Number of OSDs which this Pool will have PGs in. Typically, this is the entire Cluster OSD count, but could be less based on CRUSH rules. (e.g. Separate SSD and SATA disk sets)', 'width':'10%', 'global':1}; +key_values['percData'] ={'name':'%Data', 'default': 5, 'description': 'This value represents the approximate percentage of data which will be contained in this pool for that specific OSD set. Examples are pre-filled below for guidance.','width':'10%'}; +key_values['targPGsPerOSD'] ={'name':'Target PGs per OSD', 'default':100, 'description': 'This value should be populated based on the following guidance:', 'width':'10%', 'global':1, 'options': [ ['100','If the cluster OSD count is not expected to increase in the foreseeable future.'], ['200', 'If the cluster OSD count is expected to increase (up to double the size) in the foreseeable future.']]} + +var notes ={ + 'totalPerc':'"Total Data Percentage" below table should be a multiple of 100%.', + 'totalPGs':'"Total PG Count" below table will be the count of Primary PG copies. However, when calculating total PGs per OSD average, you must include all copies.', + 'noDecrease':'It\'s also important to know that the PG count can be increased, but NEVER decreased without destroying / recreating the pool. However, increasing the PG Count of a pool is one of the most impactful events in a Ceph Cluster, and should be avoided for production clusters if possible.', +}; + +var presetTables={}; +presetTables['All-in-One']=[ + { 'poolName' : 'rbd', 'size' : '3', 'osdNum' : '100', 'percData' : '100', 'targPGsPerOSD' : '100'}, +]; +presetTables['OpenStack']=[ + { 'poolName' : 'cinder-backup', 'size' : '3', 'osdNum' : '100', 'percData' : '25', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'cinder-volumes', 'size' : '3', 'osdNum' : '100', 'percData' : '53', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'ephemeral-vms', 'size' : '3', 'osdNum' : '100', 'percData' : '15', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'glance-images', 'size' : '3', 'osdNum' : '100', 'percData' : '7', 'targPGsPerOSD' : '100'}, +]; +presetTables['OpenStack w RGW - Jewel and later']=[ + { 'poolName' : '.rgw.root', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.control', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.data.root', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.gc', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.intent-log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.meta', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.usage', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.keys', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.email', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.swift', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.uid', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.buckets.extra', 'size' : '3', 'osdNum' : '100', 'percData' : '1.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.buckets.index', 'size' : '3', 'osdNum' : '100', 'percData' : '3.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.buckets.data', 'size' : '3', 'osdNum' : '100', 'percData' : '19', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'cinder-backup', 'size' : '3', 'osdNum' : '100', 'percData' : '18', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'cinder-volumes', 'size' : '3', 'osdNum' : '100', 'percData' : '42.8', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'ephemeral-vms', 'size' : '3', 'osdNum' : '100', 'percData' : '10', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'glance-images', 'size' : '3', 'osdNum' : '100', 'percData' : '5', 'targPGsPerOSD' : '100'}, +]; + +presetTables['Rados Gateway Only - Jewel and later']=[ + { 'poolName' : '.rgw.root', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.control', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.data.root', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.gc', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.intent-log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.meta', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.usage', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.keys', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.email', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.swift', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.users.uid', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.buckets.extra', 'size' : '3', 'osdNum' : '100', 'percData' : '1.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.buckets.index', 'size' : '3', 'osdNum' : '100', 'percData' : '3.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'default.rgw.buckets.data', 'size' : '3', 'osdNum' : '100', 'percData' : '94.8', 'targPGsPerOSD' : '100'}, +]; + +presetTables['OpenStack w RGW - Infernalis and earlier']=[ + { 'poolName' : '.intent-log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.buckets', 'size' : '3', 'osdNum' : '100', 'percData' : '18', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.buckets.extra', 'size' : '3', 'osdNum' : '100', 'percData' : '1.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.buckets.index', 'size' : '3', 'osdNum' : '100', 'percData' : '3.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.control', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.gc', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.root', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.usage', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users.email', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users.swift', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users.uid', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'cinder-backup', 'size' : '3', 'osdNum' : '100', 'percData' : '19', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'cinder-volumes', 'size' : '3', 'osdNum' : '100', 'percData' : '42.9', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'ephemeral-vms', 'size' : '3', 'osdNum' : '100', 'percData' : '10', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'glance-images', 'size' : '3', 'osdNum' : '100', 'percData' : '5', 'targPGsPerOSD' : '100'}, +]; + +presetTables['Rados Gateway Only - Infernalis and earlier']=[ + { 'poolName' : '.intent-log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.log', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.buckets', 'size' : '3', 'osdNum' : '100', 'percData' : '94.9', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.buckets.extra', 'size' : '3', 'osdNum' : '100', 'percData' : '1.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.buckets.index', 'size' : '3', 'osdNum' : '100', 'percData' : '3.0', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.control', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.gc', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.rgw.root', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.usage', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users.email', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users.swift', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, + { 'poolName' : '.users.uid', 'size' : '3', 'osdNum' : '100', 'percData' : '0.1', 'targPGsPerOSD' : '100'}, +]; +presetTables['RBD and libRados']=[ + { 'poolName' : 'rbd', 'size' : '3', 'osdNum' : '100', 'percData' : '75', 'targPGsPerOSD' : '100'}, + { 'poolName' : 'myObjects', 'size' : '3', 'osdNum' : '100', 'percData' : '25', 'targPGsPerOSD' : '100'}, +]; + +$(function() { + $("#presetType").on("change",changePreset); + $("#btnAddPool").on("click",addPool); + $("#btnGenCommands").on("click",generateCommands); + $.each(presetTables,function(index,value) { + selIndex=''; + if ( index == 'OpenStack w RGW - Jewel and later' ) + selIndex=' selected'; + $("#presetType").append(""); + }); + changePreset(); + $("#beforeTable").html("
Key
"); + $.each(key_values, function(index, value) { + pre=''; + post=''; + if ('global' in value) { + pre=''; + post='' + } + + var dlAdd="
"+pre+value['name']+post+"
"+value['description']; + if ( 'options' in value ) { + dlAdd+="
"; + $.each(value['options'], function (subIndex, subValue) { + dlAdd+="
"+subValue[0]+"
"+subValue[1]+"
"; + }); + dlAdd+="
"; + } + dlAdd+="
"; + $("#keyDL").append(dlAdd); + }); + $("#afterTable").html("
Notes
"); + $.each(notes,function(index, value) { + $("#notesUL").append("\t
  • "+value+"
  • \n"); + }); + +}); + +function changePreset() { + resetTable(); + fillTable($("#presetType").val()); +} + +function resetTable() { + $("#pgsperpool").html(""); + $("#pgsperpool").append("\n\n"); + $("#headerRow").append("\t \n"); + var fieldCount=0; + var percDataIndex=0; + $.each(key_values, function(index, value) { + fieldCount++; + pre=''; + post=''; + var widthAdd=''; + if ( index == 'percData' ) + percDataIndex=fieldCount; + if ('width' in value) + widthAdd=' style=\'width: '+value['width']+'\''; + if ('global' in value) { + pre=''; + post='' + } + $("#headerRow").append("\t"+pre+value['name']+post+"\n"); + }); + percDataIndex++; + $("#headerRow").append("\tSuggested PG Count\n"); + $("#pgsperpool").append("Total Data Percentage: 0% PG Total Count: 0"); +} + +function nearestPow2( aSize ){ + var tmp=Math.pow(2, Math.round(Math.log(aSize)/Math.log(2))); + if(tmp<(aSize*(1-pow2belowThreshold))) + tmp*=2; + return tmp; +} + +function globalChange(field) { + dialogHTML='
    '; + dialogHTML+='
    \n'; + dialogHTML+=''; + dialogHTML+=''; + dialogHTML+=''; + dialogHTML+='
    '; + globalDialog=$(dialogHTML).dialog({ + autoOpen: true, + width: 350, + show: 'fold', + hide: 'fold', + modal: true, + buttons: { + "Update Value": function() { massUpdate($("#globalField").val(),$("#globalValue").val()); globalDialog.dialog("close"); setTimeout(function() { globalDialog.dialog("destroy"); }, 1000); }, + "Cancel": function() { globalDialog.dialog("close"); setTimeout(function() { globalDialog.dialog("destroy"); }, 1000); } + } + }); +} + +var rowCount=0; +function fillTable(presetType) { + rowCount=0; + $.each(presetTables[presetType], function(index,value) { + addTableRow(value); + }); +} + +function addPool() { + dialogHTML='
    '; + $.each(key_values, function(index,value) { + dialogHTML+='

    \n'; + classAdd='right'; + if ( index == 'poolName' ) + classAdd='left'; + dialogHTML+='
    '; + }); + dialogHTML+=''; + dialogHTML+='
    '; + addPoolDialog=$(dialogHTML).dialog({ + autoOpen: true, + width: 350, + show: 'fold', + hide: 'fold', + modal: true, + buttons: { + "Add Pool": function() { + var newPoolValues={}; + $.each(key_values,function(index,value) { + newPoolValues[index]=$("#new"+index).val(); + }); + addTableRow(newPoolValues); + addPoolDialog.dialog("close"); + setTimeout(function() { addPoolDialog.dialog("destroy"); }, 1000); }, + "Cancel": function() { addPoolDialog.dialog("close"); setTimeout(function() { addPoolDialog.dialog("destroy"); }, 1000); } + } + }); + +// addTableRow({'poolName':'newPool','size':3, 'osdNum':100,'targPGsPerOSD': 100, 'percData':0}); +} + +function addTableRow(rowValues) { + rowAdd="\n"; + rowAdd+="\t\n"; + $.each(key_values, function(index,value) { + classAdd=' center'; + modifier=''; + if ( index == 'percData' ) { + classAdd='" style="text-align: right;'; + // modifier=' %'; + } else if ( index == 'poolName' ) + classAdd=' left'; + rowAdd+="\t"+modifier+"\n"; + }); + rowAdd+="\t0"; + $("#totalRow").before(rowAdd); + updatePGCount(rowCount); + $("[id$='percData_input']").each(function() { var fieldVal=parseFloat($(this).val()); $(this).val(fieldVal.toFixed(2)); }); + rowCount++; +} + +function updatePGCount(rowID) { + if(rowID==-1) { + for(var i=0;icalcValue) + $("#row"+rowID+"_pgCount").html(minValue); + else + $("#row"+rowID+"_pgCount").html(calcValue); + } + updateTotals(); +} + +function focusMe(rowID,field) { + $("#row"+rowID+"_"+field+"_input").toggleClass('inputColor'); + $("#row"+rowID+"_"+field+"_input").toggleClass('highlightColor'); + $("#dt_"+field).toggleClass('highlightColor'); + $("#dd_"+field).toggleClass('highlightColor'); + updatePGCount(rowID); +} + +function blurMe(rowID,field) { + focusMe(rowID,field); + $("[id$='percData_input']").each(function() { var fieldVal=parseFloat($(this).val()); $(this).val(fieldVal.toFixed(2)); }); +} + +function keyMe(rowID,field) { + updatePGCount(rowID); +} + +function massUpdate(field,value) { + $("[id$='_"+field+"_input']").val(value); + key_values[field]['default']=value; + updatePGCount(-1); +} + +function updateTotals() { + var totalPerc=0; + var totalPGs=0; + $("[id$='percData_input']").each(function() { + totalPerc+=parseFloat($(this).val()); + if ( parseFloat($(this).val()) > 100 ) + $(this).addClass('ui-state-error'); + else + $(this).removeClass('ui-state-error'); + }); + $("[id$='_pgCount']").each(function() { + totalPGs+=parseInt($(this).html()); + }); + $("#percTotalValue").html(totalPerc.toFixed(2)); + $("#pgTotalValue").html(totalPGs); + if(parseFloat(totalPerc.toFixed(2)) % 100 != 0) { + $("#percTotalValue").addClass('ui-state-error'); + $("#li_totalPerc").addClass('ui-state-error'); + } else { + $("#percTotalValue").removeClass('ui-state-error'); + $("#li_totalPerc").removeClass('ui-state-error'); + } + $("#commandCode").html(""); +} + +function generateCommands() { + outputCommands="## Note: The 'while' loops below pause between pools to allow all\n\ +## PGs to be created. This is a safety mechanism to prevent\n\ +## saturating the Monitor nodes.\n\ +## -------------------------------------------------------------------\n\n"; + for(i=0;i + + + + + + + + + + + +
    +
    +

    Ceph PGs per Pool Calculator


    Instructions +
      +
    1. Confirm your understanding of the fields by reading through the Key below.
    2. +
    3. Select a "Ceph Use Case" from the drop down menu.
    4. +
    5. Adjust the values in the "Green" shaded fields below.
      + Tip: Headers can be clicked to change the value throughout the table.
    6. +
    7. You will see the Suggested PG Count update based on your inputs.
    8. +
    9. Click the "Add Pool" button to create a new line for a new pool.
    10. +
    11. Click the icon to delete the specific Pool.
    12. +
    13. For more details on the logic used and some important details, see the area below the table.
    14. +
    15. Once all values have been adjusted, click the "Generate Commands" button to get the pool creation commands.
    16. +
    +
    +
    +
    +

     

    +
    +
    + +
    +
    +
    +
    +
    Logic behind Suggested PG Count +
    +
    ( Target PGs per OSD ) x ( OSD # ) x ( %Data )
    +
    ( Size )
    +
      +
    1. If the value of the above calculation is less than the value of ( OSD# ) / ( Size ), then the value is updated to the value of ( OSD# ) / ( Size ). This is to ensure even load / data distribution by allocating at least one Primary or Secondary PG to every OSD for every Pool.
    2. +
    3. The output value is then rounded to the nearest power of 2.
      Tip: The nearest power of 2 provides a marginal improvement in efficiency of the CRUSH algorithm.
    4. +
    5. If the nearest power of 2 is more than 25% below the original value, the next higher power of 2 is used.
    6. +
    + Objective +
    • The objective of this calculation and the target ranges noted in the "Key" section above are to ensure that there are sufficient Placement Groups for even data distribution throughout the cluster, while not going high enough on the PG per OSD ratio to cause problems during Recovery and/or Backfill operations.
    + Effects of enpty or non-active pools: +
      +
    • Empty or otherwise non-active pools should not be considered helpful toward even data distribution throughout the cluster.
    • +
    • However, the PGs associated with these empty / non-active pools still consume memory and CPU overhead.
    • +
    +
    +
    +
    +
    diff --git a/doc/rados/operations/placement-groups.rst b/doc/rados/operations/placement-groups.rst index 84dad884e22..3875f7cc814 100644 --- a/doc/rados/operations/placement-groups.rst +++ b/doc/rados/operations/placement-groups.rst @@ -647,6 +647,8 @@ more time for peering. Setting the Number of PGs ========================= +:ref:`Placement Group Link ` + Setting the initial number of PGs in a pool must be done at the time you create the pool. See `Create a Pool`_ for details.