Ovirt程序员

【Ovirt 笔记】群集策略

2017-06-15  本文已影响18人  58bc06151329

分析整理的版本为 Ovirt 3.4.5 版本。

群集策略

过滤器模块

过滤器是运行虚拟机的硬约束,是虚拟机能够运行的最小要求。

PinToHost 过滤器

new PinToHostPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    if (vm.getMigrationSupport() == MigrationSupport.PINNED_TO_HOST) {
        // host has been specified for pin to host.
        if(vm.getDedicatedVmForVds() != null) {
            for (VDS host : hosts) {
                if (host.getId().equals(vm.getDedicatedVmForVds())) {
                    return Arrays.asList(host);
                }
            }
        } else {
            // check pin to any (the VM should be down/ no migration allowed).
            if (vm.getRunOnVds() == null) {
                return hosts;
            }
        }

        // if flow reaches here, the VM is pinned but there is no dedicated host.
        // added by zb for DTCLOUD-320
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "PinToHost"); // 虚拟机需运行在指定主机
        AuditLogDirector.log(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
        return new ArrayList<>();
    }

    return hosts;
}

CPU 过滤器

new CPUPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    List<VDS> list = new ArrayList<VDS>();
    for (VDS vds : hosts) {
        Integer cores = SlaValidator.getInstance().getEffectiveCpuCores(vds);
        if (cores != null && vm.getNumOfCpus() > cores) {
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_CPUS.toString());
            log.debugFormat("host {0} has less cores ({1}) than vm cores ({2})",
                    vds.getName(),
                    cores,
                    vm.getNumOfCpus());
            continue;
        }
        list.add(vds);
    }
    // added by zb for DTCLOUD-320
    if (list != null && list.size() == 0) {
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "CPU");
        AlertDirector.Alert(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
    }
    return list;
}

Memory 过滤器

new MemoryPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    List<VDS> list = new ArrayList<>();
    // If Vm in Paused mode - no additional memory allocation needed
    if (vm.getStatus() == VMStatus.Paused) {
        return hosts;
    }
    for (VDS vds : hosts) {
        if (!isVMSwapValueLegal(vds)) {
            log.debugFormat("host '{0}' swap value is illegal", vds.getName());
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_SWAP.toString());
            continue;
        }
        if (!memoryChecker.evaluate(vds, vm)) {
            log.debugFormat("host '{0}' has insufficient memory to run the VM", vds.getName());
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_MEMORY.toString());
            continue;
        }
        list.add(vds);
    }
    // added by zb for DTCLOUD-320
    if (list == null || list.size() == 0) {
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "\u5185\u5b58"); // 内存\u5185\u5b58
        AuditLogDirector.log(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
    }
    return list;
}

Network 过滤器

new NetworkPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    if (hosts == null || hosts.isEmpty()) {
        return null;
    }

    List<VDS> toRemoveHostList = new ArrayList<VDS>();
    List<VmNetworkInterface> vmNICs = getVmNetworkInterfaceDao().getAllForVm(vm.getId());
    Guid clusterId = hosts.get(0).getVdsGroupId();
    List<Network> clusterNetworks = getNetworkDAO().getAllForCluster(clusterId);
    Map<String, Network> networksByName = Entities.entitiesByName(clusterNetworks);
    Map<Guid, List<String>> hostNics = getInterfaceDAO().getHostNetworksByCluster(clusterId);
    Network displayNetwork = NetworkUtils.getDisplayNetwork(clusterNetworks);
    Map<Guid, VdsNetworkInterface> hostDisplayNics = getDisplayNics(displayNetwork);

    for (VDS host : hosts) {
        ValidationResult result =
                validateRequiredNetworksAvailable(host,
                        vm,
                        vmNICs,
                        displayNetwork,
                        networksByName,
                        hostNics.get(host.getId()),
                        hostDisplayNics.get(host.getId()));

        if (!result.isValid()) {
            messages.add(result.getMessage().name());
            if (result.getVariableReplacements() != null) {
                messages.addAll(result.getVariableReplacements());
            }

            toRemoveHostList.add(host);
        }
    }
    hosts.removeAll(toRemoveHostList);
    // added by zb for DTCLOUD-320
    if (hosts == null || hosts.size() == 0) {
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "\u7f51\u7edc"); //网络
        AlertDirector.Alert(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
    }
    return hosts;
}

HA 过滤器

new HostedEngineHAClusterFilterPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {

    // The filter is relevant only for Hosted Engine VM
    if (vm.isHostedEngine()) {

        List<VDS> hostsToRunOn = new ArrayList<VDS>();
        for (VDS host : hosts) {
            int haScore = host.getHighlyAvailableScore();
            if (haScore > 0) {
                hostsToRunOn.add(host);
                log.debugFormat("Host {0} wasn't filtered out as it has a score of {1}",
                        host.getName(),
                        haScore);
            } else {
                log.debugFormat("Host {0} was filtered out as it doesn't have a positive score (the score is {1})", host.getName(), haScore);
            }
        }

        if (hostsToRunOn.isEmpty()) {
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_NO_HA_VDS.name());
            // added by zb for DTCLOUD-320
            AuditLogableBase logable = new AuditLogableBase();
            logable.addCustomValue("policyName", "HA");
            AuditLogDirector.log(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
        }

        return hostsToRunOn;
    } else {
        return hosts;
    }
}

CPU-Level 过滤器

new CpuLevelFilterPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    boolean filteredOutHosts = false;
    if (StringUtils.isNotEmpty(vm.getCpuName())) {
        List<VDS> hostsToRunOn = new ArrayList<VDS>();
        for (VDS host : hosts) {
            String hostCpuName = CpuFlagsManagerHandler.FindMaxServerCpuByFlags(host.getCpuFlags(), host.getVdsGroupCompatibilityVersion()).getCpuName();
            if (StringUtils.isNotEmpty(hostCpuName)) {
                int compareResult = CpuFlagsManagerHandler.compareCpuLevels(vm.getCpuName(), hostCpuName, vm.getVdsGroupCompatibilityVersion());
                if (compareResult <= 0) {
                    hostsToRunOn.add(host);
                    log.debugFormat("Host {0} wasn't filtered out as it has a CPU level ({1}) which is higher or equal than the CPU level the VM was run with ({2})",
                            host.getName(),
                            hostCpuName,
                            vm.getCpuName());
                } else {
                    log.debugFormat("Host {0} was filtered out as it has a CPU level ({1}) which is lower than the CPU level the VM was run with ({2})",
                            host.getName(),
                            hostCpuName,
                            vm.getCpuName());
                    filteredOutHosts = true;
                }
            }
        }

        if (filteredOutHosts) {
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_CPU_LEVEL.toString());
        }

        // added by zb for DTCLOUD-320
        if (hostsToRunOn == null || hostsToRunOn.size() == 0) {
            AuditLogableBase logable = new AuditLogableBase();
            logable.addCustomValue("policyName", "CPU\u7ea7\u522b"); //CPU-Level
            AlertDirector.Alert(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
        }
        return hostsToRunOn;
    } else {
        return hosts;
    }
}

VmAffinityGroups 过滤器

new VmAffinityFilterPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    return getAcceptableHosts(true, hosts, vm, messages);
}

权重模块

权重代表对运行虚拟机的软约束,策略使用到的权重模块分数最终会被统计,最低权重值的主机会被调度程序选择。如果 群集 的群集策略中选中 为速度进行优化,且多于 10 个请求时跳过权重计算,否则将进行权重计算,权重计算会使用 配置 中的 Weights模块 权重模块。

公用的权重模块

策略 NonePower_SavingEvenly_DistributedVm_Evenly_Distributed,共同使用的权重模块有以下几种:

HA 权重模块的分数算法

new HostedEngineHAClusterWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    boolean isHostedEngine = vm.isHostedEngine();

    if (isHostedEngine) {
        // If the max HA score is higher than the max weight, then we normalize. Otherwise the ratio is 1, keeping the value as is
        float ratio = MAXIMUM_HA_SCORE > MaxSchedulerWeight ? ((float) MaxSchedulerWeight / MAXIMUM_HA_SCORE) : 1;
        for (VDS host : hosts) {
            scores.add(new Pair<Guid, Integer>(host.getId(), MaxSchedulerWeight - Math.round(host.getHighlyAvailableScore() * ratio)));
        }
    } else {
        fillDefaultScores(hosts, scores);
    }
    return scores;
}

VmAffinityGroups 权重模块的分数算法

new VmAffinityWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    // reuse filter functionality with soft constraint
    List<VDS> acceptableHostsList =
            VmAffinityFilterPolicyUnit.getAcceptableHosts(false,
                    hosts,
                    vm,
                    new ArrayList<String>());
    Map<Guid, VDS> acceptableHostsMap = new HashMap<Guid, VDS>();
    if (acceptableHostsList != null) {
        for (VDS acceptableHost : acceptableHostsList) {
            acceptableHostsMap.put(acceptableHost.getId(), acceptableHost);
        }
    }

    List<Pair<Guid, Integer>> retList = new ArrayList<Pair<Guid, Integer>>();
    int score;
    for (VDS host : hosts) {
        score = 1;
        if (!acceptableHostsMap.containsKey(host.getId())) {
            score = MaxSchedulerWeight;
        }
        retList.add(new Pair<Guid, Integer>(host.getId(), score));
    }

    return retList;
}
关系组属性 说明
Positive 虚拟机要运行在同一主机上
Negative 虚拟机要运行在不同的主机上
Hard 不考虑外部条件强制执行
Soft 在外部条件允许的条件下尽量执行

OptimalForHaReservation 权重模块的分数算法

new HaReservationWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {

    log.info("Started HA reservation scoring method");
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();

    Map<Guid, Integer> hostsHaVmCount = new HashMap<Guid, Integer>();

    // If the vm is not HA or the cluster is not marked as HA Reservation set default score.
    VDSGroup vdsGroup = DbFacade.getInstance().getVdsGroupDao().get(hosts.get(0).getVdsGroupId());

    if (!vm.isAutoStartup() || !vdsGroup.supportsHaReservation()) {
        fillDefaultScores(hosts, scores);
    } else {
        // Use a single call to the DB to retrieve all VM in the Cluster and map them by Host id
        Map<Guid, List<VM>> hostId2HaVmMapping = HaReservationHandling.mapHaVmToHostByCluster(vdsGroup.getId());

        int maxCount = 0;
        for (VDS host : hosts) {

            int count = 0;
            if (hostId2HaVmMapping.containsKey(host.getId())) {
                count = hostId2HaVmMapping.get(host.getId()).size();
            }
            maxCount = (maxCount < count) ? count : maxCount;
            hostsHaVmCount.put(host.getId(), count);
        }

        // Fit count to scale of 0 to RATIO_FACTOR
        if (maxCount > 0) {
            for (VDS host : hosts) {
                int fittedCount =
                        Math.round(hostsHaVmCount.get(host.getId()).floatValue() / maxCount * RATIO_FACTOR);
                hostsHaVmCount.put(host.getId(), fittedCount);
            }
        }

        // Get scale down param
        Integer scaleDownParameter = 1;
        if (parameters.get("ScaleDown") != null) {
            scaleDownParameter = Integer.parseInt(parameters.get("ScaleDown"));
        } else {
            scaleDownParameter = Config.<Integer> getValue(ConfigValues.ScaleDownForHaReservation);
        }

        // Set the score pairs
        for (VDS host : hosts) {
            // Scale down if needed
            int haCount = hostsHaVmCount.get(host.getId());
            haCount = (int) Math.ceil(haCount / scaleDownParameter.floatValue());

            scores.add(new Pair<Guid, Integer>(host.getId(), haCount));

            log.infoFormat("Score for host:{0} is {1}", host.getName(), haCount);
        }

    }

    return scores;
}
int maxCount = 0;
for (VDS host : hosts) {

    int count = 0;
    if (hostId2HaVmMapping.containsKey(host.getId())) {
        count = hostId2HaVmMapping.get(host.getId()).size();
    }
    maxCount = (maxCount < count) ? count : maxCount;
    hostsHaVmCount.put(host.getId(), count);
}

// Fit count to scale of 0 to RATIO_FACTOR
if (maxCount > 0) {
    for (VDS host : hosts) {
        int fittedCount =
                Math.round(hostsHaVmCount.get(host.getId()).floatValue() / maxCount * RATIO_FACTOR);
        hostsHaVmCount.put(host.getId(), fittedCount);
    }
}

不同的权重模块

OptimalForEvenDistribution 权重模块的分数算法(主要应用于 Evenly_Distributed 群集策略)

new EvenDistributionWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    VDSGroup vdsGroup = DbFacade.getInstance().getVdsGroupDao().get(hosts.get(0).getVdsGroupId());
    boolean countThreadsAsCores = vdsGroup != null ? vdsGroup.getCountThreadsAsCores() : false;
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    for (VDS vds : hosts) {
        scores.add(new Pair<Guid, Integer>(vds.getId(), calcEvenDistributionScore(vds, vm, countThreadsAsCores)));
    }
    return scores;
}
return (hostCpu / vcpu) + (pendingVcpus + vm.getNumOfCpus() + spmCpu) / hostCores;
public static Integer getEffectiveCpuCores(VDS vds, boolean countThreadsAsCores) {
    if (vds.getCpuThreads() != null
            && countThreadsAsCores) {
        return vds.getCpuThreads();
    } else {
        return vds.getCpuCores();
    }
}

None 权重模块的分数算法(主要应用于 None 群集策略)

new NoneWeightPolicyUnit(policyUnit);
@Override
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    return super.score(hosts, vm, parameters);
}

OptimalForPowerSaving 权重模块的分数算法(主要应用于 Power_Saving 群集策略)

new PowerSavingWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    VDSGroup vdsGroup = null;
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    for (VDS vds : hosts) {
        int score = MaxSchedulerWeight - 1;
        if (vds.getVmCount() > 0) {
            if (vdsGroup == null) {
                vdsGroup = DbFacade.getInstance().getVdsGroupDao().get(hosts.get(0).getVdsGroupId());
            }
            score -=
                    calcEvenDistributionScore(vds, vm, vdsGroup != null ? vdsGroup.getCountThreadsAsCores() : false);
        }
        scores.add(new Pair<Guid, Integer>(vds.getId(), score));
    }
    return scores;
}

OptimalForEvenGuestDistribution 权重模块的分数算法(主要应用于 VM_Evenly_Distributed 群集策略)

new EvenGuestDistributionWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    for (VDS vds : hosts) {
        scores.add(new Pair<Guid, Integer>(vds.getId(), calcEvenGuestDistributionScore(vds, parameters)));
    }
    return scores;
}
private int getOccupiedVmSlots(VDS vds, Map<String, String> parameters) {
    int occupiedSlots = vds.getVmActive();
    final int SPMVMCountGrace = NumberUtils.toInt(parameters.get("SpmVmGrace"),
            spmVmGrace);
    if (vds.isSpm())
        occupiedSlots += SPMVMCountGrace;

    return occupiedSlots;
}

private int calcEvenGuestDistributionScore(VDS vds, Map<String, String> parameters) {
    return Math.max(0, getOccupiedVmSlots(vds, parameters));
}

负载平衡器

None 负载平衡器

new NoneBalancePolicyUnit(policyUnit);
@Override
public Pair<List<Guid>, Guid> balance(VDSGroup cluster,
        List<VDS> hosts,
        Map<String, String> parameters,
        ArrayList<String> messages) {
    return null;
}

OptimalForPowerSaving 负载平衡器

只在一部分主机中分配 CPU 处理负载,这可以为其它主机节省能源消耗。如果主机上的 CPU 负载处于低使用率状态,并且处于这个状态的时间超过了预先定义的时间,这个主机上的虚拟机将被迁移到其它主机上,并在迁移后关闭这个主机。如果一个主机的利用率已经达到了设置的最高值,新加入到这个主机的虚拟机将不会被启动。

参数 说明
CpuOverCommitDurationMinutes 在群集策略起作用前,一个主机可以在 CPU 负载超过利用率中所设置的值的情况下运行的 最长 时间(以分钟为单位)。通过这个设置,可以避免因为暂时的高 CPU 负载所带来的不必要的虚拟机迁移操作,默认获取系统参数 CpuOverCommitDurationMinutes 的值,默认值是 2 分钟。
HighUtilization 最高利用率,一个百分比值,默认获取系统参数 LowUtilizationForPowerSave 的值,默认值为 10
LowUtilization 最低利用率,一个百分比值,默认获取系统参数 HighUtilizationForPowerSave 的值,默认值为 6
return new PowerSavingBalancePolicyUnit(policyUnit);
public Pair<List<Guid>, Guid> balance(VDSGroup cluster,
        List<VDS> hosts,
        Map<String, String> parameters,
        ArrayList<String> messages) {
    final Pair<List<Guid>, Guid> migrationRule =  super.balance(cluster, hosts, parameters, messages);

    List<VDS> allHosts = DbFacade.getInstance().getVdsDao().getAllForVdsGroup(cluster.getId());

    List<VDS> emptyHosts = new ArrayList<>();
    List<VDS> maintenanceHosts = new ArrayList<>();
    List<VDS> downHosts = new ArrayList<>();

    getHostLists(allHosts, parameters, emptyHosts, maintenanceHosts, downHosts);

    Pair<VDS, VDSStatus> action = evaluatePowerManagementSituation(
            cluster,
            downHosts,
            maintenanceHosts,
            emptyHosts,
            parameters);

    if (action != null) {
        processPmAction(action);
    }

    return migrationRule;
}

OptimalForEvenDistribution 负载平衡器

new EvenDistributionBalancePolicyUnit(policyUnit);

OptimalForEvenGuestDistribution 负载平衡器

new EvenGuestDistributionBalancePolicyUnit(policyUnit);
参数 说明
SpmVmGrace SPM 该变量定义了 SPM 主机究竟比其他主机运行的虚拟机少多少,默认获取系统参数 SpmVmGraceForEvenGuestDistribute 的值,默认值为 5
MigrationThreshold 在虚拟机从主机迁移之前,定义一个缓冲区。它最大限度的包容了最高利用率主机和最低利用率主机在虚拟机数量上的不同。当在群集中的每台主机都有一个虚拟机计数在值内时,该群集是均衡的,默认系统参数 MigrationThresholdForEvenGuestDistribute 的值,默认值为:5
HighVmCount 设置可在每台服务器运行虚拟机的最大数目。超过此限制限定服务器重载,默认系统参数 HighVmCountForEvenGuestDistribute 的值,默认值为:10

分析整理的版本为 Ovirt 3.4.5 版本。

群集策略

过滤器模块

过滤器是运行虚拟机的硬约束,是虚拟机能够运行的最小要求。

PinToHost 过滤器

new PinToHostPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    if (vm.getMigrationSupport() == MigrationSupport.PINNED_TO_HOST) {
        // host has been specified for pin to host.
        if(vm.getDedicatedVmForVds() != null) {
            for (VDS host : hosts) {
                if (host.getId().equals(vm.getDedicatedVmForVds())) {
                    return Arrays.asList(host);
                }
            }
        } else {
            // check pin to any (the VM should be down/ no migration allowed).
            if (vm.getRunOnVds() == null) {
                return hosts;
            }
        }

        // if flow reaches here, the VM is pinned but there is no dedicated host.
        // added by zb for DTCLOUD-320
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "PinToHost"); // 虚拟机需运行在指定主机
        AuditLogDirector.log(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
        return new ArrayList<>();
    }

    return hosts;
}

CPU 过滤器

new CPUPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    List<VDS> list = new ArrayList<VDS>();
    for (VDS vds : hosts) {
        Integer cores = SlaValidator.getInstance().getEffectiveCpuCores(vds);
        if (cores != null && vm.getNumOfCpus() > cores) {
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_CPUS.toString());
            log.debugFormat("host {0} has less cores ({1}) than vm cores ({2})",
                    vds.getName(),
                    cores,
                    vm.getNumOfCpus());
            continue;
        }
        list.add(vds);
    }
    // added by zb for DTCLOUD-320
    if (list != null && list.size() == 0) {
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "CPU");
        AlertDirector.Alert(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
    }
    return list;
}

Memory 过滤器

new MemoryPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    List<VDS> list = new ArrayList<>();
    // If Vm in Paused mode - no additional memory allocation needed
    if (vm.getStatus() == VMStatus.Paused) {
        return hosts;
    }
    for (VDS vds : hosts) {
        if (!isVMSwapValueLegal(vds)) {
            log.debugFormat("host '{0}' swap value is illegal", vds.getName());
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_SWAP.toString());
            continue;
        }
        if (!memoryChecker.evaluate(vds, vm)) {
            log.debugFormat("host '{0}' has insufficient memory to run the VM", vds.getName());
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_MEMORY.toString());
            continue;
        }
        list.add(vds);
    }
    // added by zb for DTCLOUD-320
    if (list == null || list.size() == 0) {
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "\u5185\u5b58"); // 内存\u5185\u5b58
        AuditLogDirector.log(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
    }
    return list;
}

Network 过滤器

new NetworkPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    if (hosts == null || hosts.isEmpty()) {
        return null;
    }

    List<VDS> toRemoveHostList = new ArrayList<VDS>();
    List<VmNetworkInterface> vmNICs = getVmNetworkInterfaceDao().getAllForVm(vm.getId());
    Guid clusterId = hosts.get(0).getVdsGroupId();
    List<Network> clusterNetworks = getNetworkDAO().getAllForCluster(clusterId);
    Map<String, Network> networksByName = Entities.entitiesByName(clusterNetworks);
    Map<Guid, List<String>> hostNics = getInterfaceDAO().getHostNetworksByCluster(clusterId);
    Network displayNetwork = NetworkUtils.getDisplayNetwork(clusterNetworks);
    Map<Guid, VdsNetworkInterface> hostDisplayNics = getDisplayNics(displayNetwork);

    for (VDS host : hosts) {
        ValidationResult result =
                validateRequiredNetworksAvailable(host,
                        vm,
                        vmNICs,
                        displayNetwork,
                        networksByName,
                        hostNics.get(host.getId()),
                        hostDisplayNics.get(host.getId()));

        if (!result.isValid()) {
            messages.add(result.getMessage().name());
            if (result.getVariableReplacements() != null) {
                messages.addAll(result.getVariableReplacements());
            }

            toRemoveHostList.add(host);
        }
    }
    hosts.removeAll(toRemoveHostList);
    // added by zb for DTCLOUD-320
    if (hosts == null || hosts.size() == 0) {
        AuditLogableBase logable = new AuditLogableBase();
        logable.addCustomValue("policyName", "\u7f51\u7edc"); //网络
        AlertDirector.Alert(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
    }
    return hosts;
}

HA 过滤器

new HostedEngineHAClusterFilterPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {

    // The filter is relevant only for Hosted Engine VM
    if (vm.isHostedEngine()) {

        List<VDS> hostsToRunOn = new ArrayList<VDS>();
        for (VDS host : hosts) {
            int haScore = host.getHighlyAvailableScore();
            if (haScore > 0) {
                hostsToRunOn.add(host);
                log.debugFormat("Host {0} wasn't filtered out as it has a score of {1}",
                        host.getName(),
                        haScore);
            } else {
                log.debugFormat("Host {0} was filtered out as it doesn't have a positive score (the score is {1})", host.getName(), haScore);
            }
        }

        if (hostsToRunOn.isEmpty()) {
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_NO_HA_VDS.name());
            // added by zb for DTCLOUD-320
            AuditLogableBase logable = new AuditLogableBase();
            logable.addCustomValue("policyName", "HA");
            AuditLogDirector.log(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
        }

        return hostsToRunOn;
    } else {
        return hosts;
    }
}

CPU-Level 过滤器

new CpuLevelFilterPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    boolean filteredOutHosts = false;
    if (StringUtils.isNotEmpty(vm.getCpuName())) {
        List<VDS> hostsToRunOn = new ArrayList<VDS>();
        for (VDS host : hosts) {
            String hostCpuName = CpuFlagsManagerHandler.FindMaxServerCpuByFlags(host.getCpuFlags(), host.getVdsGroupCompatibilityVersion()).getCpuName();
            if (StringUtils.isNotEmpty(hostCpuName)) {
                int compareResult = CpuFlagsManagerHandler.compareCpuLevels(vm.getCpuName(), hostCpuName, vm.getVdsGroupCompatibilityVersion());
                if (compareResult <= 0) {
                    hostsToRunOn.add(host);
                    log.debugFormat("Host {0} wasn't filtered out as it has a CPU level ({1}) which is higher or equal than the CPU level the VM was run with ({2})",
                            host.getName(),
                            hostCpuName,
                            vm.getCpuName());
                } else {
                    log.debugFormat("Host {0} was filtered out as it has a CPU level ({1}) which is lower than the CPU level the VM was run with ({2})",
                            host.getName(),
                            hostCpuName,
                            vm.getCpuName());
                    filteredOutHosts = true;
                }
            }
        }

        if (filteredOutHosts) {
            messages.add(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_CPU_LEVEL.toString());
        }

        // added by zb for DTCLOUD-320
        if (hostsToRunOn == null || hostsToRunOn.size() == 0) {
            AuditLogableBase logable = new AuditLogableBase();
            logable.addCustomValue("policyName", "CPU\u7ea7\u522b"); //CPU-Level
            AlertDirector.Alert(logable, AuditLogType.NO_HOSTS_SATISFY_POLICY);
        }
        return hostsToRunOn;
    } else {
        return hosts;
    }
}

VmAffinityGroups 过滤器

new VmAffinityFilterPolicyUnit(policyUnit);
public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> parameters, List<String> messages) {
    return getAcceptableHosts(true, hosts, vm, messages);
}

权重模块

权重代表对运行虚拟机的软约束,策略使用到的权重模块分数最终会被统计,最低权重值的主机会被调度程序选择。

公用的权重模块

策略 NonePower_SavingEvenly_DistributedVm_Evenly_Distributed,共同使用的权重模块有以下几种:

HA 权重模块的分数算法

new HostedEngineHAClusterWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    boolean isHostedEngine = vm.isHostedEngine();

    if (isHostedEngine) {
        // If the max HA score is higher than the max weight, then we normalize. Otherwise the ratio is 1, keeping the value as is
        float ratio = MAXIMUM_HA_SCORE > MaxSchedulerWeight ? ((float) MaxSchedulerWeight / MAXIMUM_HA_SCORE) : 1;
        for (VDS host : hosts) {
            scores.add(new Pair<Guid, Integer>(host.getId(), MaxSchedulerWeight - Math.round(host.getHighlyAvailableScore() * ratio)));
        }
    } else {
        fillDefaultScores(hosts, scores);
    }
    return scores;
}

VmAffinityGroups 权重模块的分数算法

new VmAffinityWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    // reuse filter functionality with soft constraint
    List<VDS> acceptableHostsList =
            VmAffinityFilterPolicyUnit.getAcceptableHosts(false,
                    hosts,
                    vm,
                    new ArrayList<String>());
    Map<Guid, VDS> acceptableHostsMap = new HashMap<Guid, VDS>();
    if (acceptableHostsList != null) {
        for (VDS acceptableHost : acceptableHostsList) {
            acceptableHostsMap.put(acceptableHost.getId(), acceptableHost);
        }
    }

    List<Pair<Guid, Integer>> retList = new ArrayList<Pair<Guid, Integer>>();
    int score;
    for (VDS host : hosts) {
        score = 1;
        if (!acceptableHostsMap.containsKey(host.getId())) {
            score = MaxSchedulerWeight;
        }
        retList.add(new Pair<Guid, Integer>(host.getId(), score));
    }

    return retList;
}

OptimalForHaReservation 权重模块的分数算法

new HaReservationWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {

    log.info("Started HA reservation scoring method");
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();

    Map<Guid, Integer> hostsHaVmCount = new HashMap<Guid, Integer>();

    // If the vm is not HA or the cluster is not marked as HA Reservation set default score.
    VDSGroup vdsGroup = DbFacade.getInstance().getVdsGroupDao().get(hosts.get(0).getVdsGroupId());

    if (!vm.isAutoStartup() || !vdsGroup.supportsHaReservation()) {
        fillDefaultScores(hosts, scores);
    } else {
        // Use a single call to the DB to retrieve all VM in the Cluster and map them by Host id
        Map<Guid, List<VM>> hostId2HaVmMapping = HaReservationHandling.mapHaVmToHostByCluster(vdsGroup.getId());

        int maxCount = 0;
        for (VDS host : hosts) {

            int count = 0;
            if (hostId2HaVmMapping.containsKey(host.getId())) {
                count = hostId2HaVmMapping.get(host.getId()).size();
            }
            maxCount = (maxCount < count) ? count : maxCount;
            hostsHaVmCount.put(host.getId(), count);
        }

        // Fit count to scale of 0 to RATIO_FACTOR
        if (maxCount > 0) {
            for (VDS host : hosts) {
                int fittedCount =
                        Math.round(hostsHaVmCount.get(host.getId()).floatValue() / maxCount * RATIO_FACTOR);
                hostsHaVmCount.put(host.getId(), fittedCount);
            }
        }

        // Get scale down param
        Integer scaleDownParameter = 1;
        if (parameters.get("ScaleDown") != null) {
            scaleDownParameter = Integer.parseInt(parameters.get("ScaleDown"));
        } else {
            scaleDownParameter = Config.<Integer> getValue(ConfigValues.ScaleDownForHaReservation);
        }

        // Set the score pairs
        for (VDS host : hosts) {
            // Scale down if needed
            int haCount = hostsHaVmCount.get(host.getId());
            haCount = (int) Math.ceil(haCount / scaleDownParameter.floatValue());

            scores.add(new Pair<Guid, Integer>(host.getId(), haCount));

            log.infoFormat("Score for host:{0} is {1}", host.getName(), haCount);
        }

    }

    return scores;
}
int maxCount = 0;
for (VDS host : hosts) {

    int count = 0;
    if (hostId2HaVmMapping.containsKey(host.getId())) {
        count = hostId2HaVmMapping.get(host.getId()).size();
    }
    maxCount = (maxCount < count) ? count : maxCount;
    hostsHaVmCount.put(host.getId(), count);
}

// Fit count to scale of 0 to RATIO_FACTOR
if (maxCount > 0) {
    for (VDS host : hosts) {
        int fittedCount =
                Math.round(hostsHaVmCount.get(host.getId()).floatValue() / maxCount * RATIO_FACTOR);
        hostsHaVmCount.put(host.getId(), fittedCount);
    }
}

不同的权重模块

OptimalForEvenDistribution 权重模块的分数算法(主要应用于 Evenly_Distributed 群集策略)

new EvenDistributionWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    VDSGroup vdsGroup = DbFacade.getInstance().getVdsGroupDao().get(hosts.get(0).getVdsGroupId());
    boolean countThreadsAsCores = vdsGroup != null ? vdsGroup.getCountThreadsAsCores() : false;
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    for (VDS vds : hosts) {
        scores.add(new Pair<Guid, Integer>(vds.getId(), calcEvenDistributionScore(vds, vm, countThreadsAsCores)));
    }
    return scores;
}
return (hostCpu / vcpu) + (pendingVcpus + vm.getNumOfCpus() + spmCpu) / hostCores;
public static Integer getEffectiveCpuCores(VDS vds, boolean countThreadsAsCores) {
    if (vds.getCpuThreads() != null
            && countThreadsAsCores) {
        return vds.getCpuThreads();
    } else {
        return vds.getCpuCores();
    }
}

None 权重模块的分数算法(主要应用于 None 群集策略)

new NoneWeightPolicyUnit(policyUnit);
@Override
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    return super.score(hosts, vm, parameters);
}

OptimalForPowerSaving 权重模块的分数算法(主要应用于 Power_Saving 群集策略)

new PowerSavingWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    VDSGroup vdsGroup = null;
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    for (VDS vds : hosts) {
        int score = MaxSchedulerWeight - 1;
        if (vds.getVmCount() > 0) {
            if (vdsGroup == null) {
                vdsGroup = DbFacade.getInstance().getVdsGroupDao().get(hosts.get(0).getVdsGroupId());
            }
            score -=
                    calcEvenDistributionScore(vds, vm, vdsGroup != null ? vdsGroup.getCountThreadsAsCores() : false);
        }
        scores.add(new Pair<Guid, Integer>(vds.getId(), score));
    }
    return scores;
}

OptimalForEvenGuestDistribution 权重模块的分数算法(主要应用于 VM_Evenly_Distributed 群集策略)

new EvenGuestDistributionWeightPolicyUnit(policyUnit);
public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, String> parameters) {
    List<Pair<Guid, Integer>> scores = new ArrayList<Pair<Guid, Integer>>();
    for (VDS vds : hosts) {
        scores.add(new Pair<Guid, Integer>(vds.getId(), calcEvenGuestDistributionScore(vds, parameters)));
    }
    return scores;
}
private int getOccupiedVmSlots(VDS vds, Map<String, String> parameters) {
    int occupiedSlots = vds.getVmActive();
    final int SPMVMCountGrace = NumberUtils.toInt(parameters.get("SpmVmGrace"),
            spmVmGrace);
    if (vds.isSpm())
        occupiedSlots += SPMVMCountGrace;

    return occupiedSlots;
}

private int calcEvenGuestDistributionScore(VDS vds, Map<String, String> parameters) {
    return Math.max(0, getOccupiedVmSlots(vds, parameters));
}

负载平衡器

None 负载平衡器

new NoneBalancePolicyUnit(policyUnit);
@Override
public Pair<List<Guid>, Guid> balance(VDSGroup cluster,
        List<VDS> hosts,
        Map<String, String> parameters,
        ArrayList<String> messages) {
    return null;
}

OptimalForPowerSaving 负载平衡器

只在一部分主机中分配 CPU 处理负载,这可以为其它主机节省能源消耗。如果主机上的 CPU 负载处于低使用率状态,并且处于这个状态的时间超过了预先定义的时间,这个主机上的虚拟机将被迁移到其它主机上,并在迁移后关闭这个主机。如果一个主机的利用率已经达到了设置的最高值,新加入到这个主机的虚拟机将不会被启动。

参数 说明
CpuOverCommitDurationMinutes 在群集策略起作用前,一个主机可以在 CPU 负载超过利用率中所设置的值的情况下运行的 最长 时间(以分钟为单位)。通过这个设置,可以避免因为暂时的高 CPU 负载所带来的不必要的虚拟机迁移操作,默认获取系统参数 CpuOverCommitDurationMinutes 的值,默认值是 2 分钟。
HighUtilization 最高利用率,一个百分比值,默认获取系统参数 LowUtilizationForPowerSave 的值,默认值为 10
LowUtilization 最低利用率,一个百分比值,默认获取系统参数 HighUtilizationForPowerSave 的值,默认值为 6
return new PowerSavingBalancePolicyUnit(policyUnit);
public Pair<List<Guid>, Guid> balance(VDSGroup cluster,
        List<VDS> hosts,
        Map<String, String> parameters,
        ArrayList<String> messages) {
    final Pair<List<Guid>, Guid> migrationRule =  super.balance(cluster, hosts, parameters, messages);

    List<VDS> allHosts = DbFacade.getInstance().getVdsDao().getAllForVdsGroup(cluster.getId());

    List<VDS> emptyHosts = new ArrayList<>();
    List<VDS> maintenanceHosts = new ArrayList<>();
    List<VDS> downHosts = new ArrayList<>();

    getHostLists(allHosts, parameters, emptyHosts, maintenanceHosts, downHosts);

    Pair<VDS, VDSStatus> action = evaluatePowerManagementSituation(
            cluster,
            downHosts,
            maintenanceHosts,
            emptyHosts,
            parameters);

    if (action != null) {
        processPmAction(action);
    }

    return migrationRule;
}

OptimalForEvenDistribution 负载平衡器

new EvenDistributionBalancePolicyUnit(policyUnit);

OptimalForEvenGuestDistribution 负载平衡器

new EvenGuestDistributionBalancePolicyUnit(policyUnit);
参数 说明
SpmVmGrace SPM 该变量定义了 SPM 主机究竟比其他主机运行的虚拟机少多少,默认获取系统参数 SpmVmGraceForEvenGuestDistribute 的值,默认值为 5
MigrationThreshold 在虚拟机从主机迁移之前,定义一个缓冲区。它最大限度的包容了最高利用率主机和最低利用率主机在虚拟机数量上的不同。当在群集中的每台主机都有一个虚拟机计数在值内时,该群集是均衡的,默认系统参数 MigrationThresholdForEvenGuestDistribute 的值,默认值为:5
HighVmCount 设置可在每台服务器运行虚拟机的最大数目。超过此限制限定服务器重载,默认系统参数 HighVmCountForEvenGuestDistribute 的值,默认值为:10
上一篇 下一篇

猜你喜欢

热点阅读