How do you print accurate scores in python?

Use the grid_scores_ attribute:

>>> clf = GridSearchCV(LogisticRegression(), {'C': [1, 2, 3]})
>>> clf.fit(np.random.randn(10, 4), np.random.randint(0, 2, 10))
GridSearchCV(cv=None,
       estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
          intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001),
       fit_params={}, iid=True, loss_func=None, n_jobs=1,
       param_grid={'C': [1, 2, 3]}, pre_dispatch='2*n_jobs', refit=True,
       score_func=None, scoring=None, verbose=0)
>>> from pprint import pprint
>>> pprint(clf.grid_scores_)
[mean: 0.40000, std: 0.11785, params: {'C': 1},
 mean: 0.40000, std: 0.11785, params: {'C': 2},
 mean: 0.40000, std: 0.11785, params: {'C': 3}]

sklearn.metrics.accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None)[source]

Accuracy classification score.

In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true.

Read more in the User Guide.

Parameters:y_true1d array-like, or label indicator array / sparse matrix

Ground truth (correct) labels.

y_pred1d array-like, or label indicator array / sparse matrix

Predicted labels, as returned by a classifier.

normalizebool, default=True

If False, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples.

sample_weightarray-like of shape (n_samples,), default=None

Sample weights.

Returns:scorefloat

If normalize == True, return the fraction of correctly classified samples (float), else returns the number of correctly classified samples (int).

The best performance is 1 with normalize == True and the number of samples with normalize == False.

See also

balanced_accuracy_score

Compute the balanced accuracy to deal with imbalanced datasets.

jaccard_score

Compute the Jaccard similarity coefficient score.

hamming_loss

Compute the average Hamming loss or Hamming distance between two sets of samples.

zero_one_loss

Compute the Zero-one classification loss. By default, the function will return the percentage of imperfectly predicted subsets.

Notes

In binary classification, this function is equal to the jaccard_score function.

Examples

>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2

In the multilabel case with binary label indicators:

>>> import numpy as np
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5

Examples using sklearn.metrics.accuracy_score¶

38 Python code examples are found related to " print scores". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

Example 1

def printClassScores(scoreList, instScoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.6f}".format(
            val=scoreList[labelName]) + args.nocol
        niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.6f}".format(
            val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr + "    " + niouStr)


# Print intersection-over-union scores for all categorys. 

Example 2

def print_scores_table(scores, prefix="TARGET"):
    prefix_path, scores["File"] = _extract_path_prefix(scores["File"])
    path_str = " ({})".format(prefix_path) if prefix_path else ""

    max_method_length = max(len(path_str) + 4, max(map(len, scores["File"])))
    print("-" * (max_method_length + 13 * 3))
    print("Word-level scores for {}:".format(prefix))
    print(
        "{:{width}}    {:9}    {:9}    {:9}".format(
            "File{}".format(path_str),
            "F1_mult",
            "F1_{}".format(const.LABELS[0]),
            "F1_{}".format(const.LABELS[1]),
            width=max_method_length,
        )
    )
    for score in np.sort(scores, order=["F1_mult", "File"])[::-1]:
        print(
            "{:{width}s}    {:<9.5f}    {:<9.5}    {:<9.5f}".format(
                *score, width=max_method_length
            )
        ) 

Example 3

def print_evaluation_scores(json_file):
    assert config.BASEDIR and os.path.isdir(config.BASEDIR)
    annofile = os.path.join(
        config.BASEDIR, 'annotations',
        'instances_{}.json'.format(config.VAL_DATASET))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    if True:#config.CATEGORY_AGNOSTIC:
        print("Using category agnostic evaluation mode")
        cocoEval.params.useCats = 0
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    if config.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        if True: #config.CATEGORY_AGNOSTIC:
            print("Using category agnostic evaluation mode")
            cocoEval.params.useCats = 0
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize() 

Example 4

def print_scores(self, show_miou=True):
        for key, rs in self.running_scores.items():
            Log.info('Result for {}'.format(key))
            if isinstance(rs, fscore_rslib.F1RunningScore):
                FScore, FScore_cls = rs.get_scores()
                Log.info('Mean FScore: {}'.format(FScore))
                Log.info(
                    'Class-wise FScore: {}'.format(
                        ', '.join(
                            '{:.3f}'.format(x)
                            for x in FScore_cls
                        )
                    )
                )
            elif isinstance(rs, rslib.SimpleCounterRunningScore):
                Log.info('ACC: {}\n'.format(rs.get_mean_acc()))
            else:
                if show_miou and hasattr(rs, 'get_mean_iou'):
                    Log.info('Mean IOU: {}\n'.format(rs.get_mean_iou()))
                Log.info('Pixel ACC: {}\n'.format(rs.get_pixel_acc()))

                if hasattr(rs, 'n_classes') and rs.n_classes == 2:
                    Log.info(
                        'F1 Score: {} Precision: {} Recall: {}\n'
                        .format(*rs.get_F1_score())
                    ) 

Example 5

def print_current_scores(self, epoch, iters, scores):
        """print current losses on console; also save the losses to the disk

        Parameters:
            epoch (int) -- current epoch
            iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
            scores (OrderedDict) -- training losses stored in the format of (name, float) pairs
        """
        message = '(epoch: %d, giters: %d) ' % (epoch, iters)
        for k, v in scores.items():
            message += '%s: %.3f ' % (k, v)

        print(message)  # print the message
        with open(self.score_log_name, "a") as log_file:
            log_file.write('%s\n' % message)  # save the message 

Example 6

def print_scores(self, metric_types=None):
        if metric_types is None:
            metric_types = self.metric_types

        scores = [np.mean([self.scores[m][trial] for trial in self.trials]) for m in metric_types]
        txt = "All: " + " ".join(["{}:{:.04}".format(metric_types[i], scores[i]) for i in range(len(metric_types))])
        print(txt) 

Example 7

def print_df_scores(df_scores, indent=''):
    """Pretty print the scores dataframe.

    Parameters
    ----------
    df_scores : pd.DataFrame
        the score dataframe
    indent : str, default=''
        indentation if needed
    """
    with option_context("display.width", 160):
        df_repr = repr(df_scores)
    df_repr_out = []
    for line, color_key in zip(df_repr.splitlines(),
                               [None, None] +
                               list(df_scores.index.values)):
        if line.strip() == 'step':
            continue
        if color_key is None:
            # table header
            if IS_COLOR_TERM:
                line = stylize(line, fg(fg_colors['title']) + attr('bold'))
        if color_key is not None:
            tokens = line.split()
            tokens_bak = tokens[:]
            if 'official_' + color_key in fg_colors:
                # line label and official score bold & bright
                if IS_COLOR_TERM:
                    label_color = fg(fg_colors['official_' + color_key])
                    tokens[0] = stylize(tokens[0], label_color + attr('bold'))
                    tokens[1] = stylize(tokens[1], label_color + attr('bold'))
            if IS_COLOR_TERM and (color_key in fg_colors):
                # other scores pale
                tokens[2:] = [stylize(token, fg(fg_colors[color_key]))
                              for token in tokens[2:]]
            for token_from, token_to in zip(tokens_bak, tokens):
                line = line.replace(token_from, token_to)
        line = indent + line
        df_repr_out.append(line)
    print('\n'.join(df_repr_out)) 

Example 8

def print_loci_scores(self):

        """Wrapper method to create a csv.DictWriter instance and call
        the Locus.print_scores method on it."""

        self.define_loci()

        # self.available_monolocus_metrics = set(self.monoholder.available_metrics)
        if len(self.loci) == 0:
            return
        for locus in self.loci:
            for row in self.loci[locus].print_scores():
                yield row 

Example 9

def printCategoryScores(scoreDict, instScoreDict, args):
    if (args.quiet):
        return
    print(args.bold + "categories       IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for categoryName in scoreDict:
        if all( label.ignoreInEval for label in category2labels[categoryName] ):
            continue
        iouStr  = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
        niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
        print("{:<14}: ".format(categoryName) + iouStr + "    " + niouStr)

# Evaluate image lists pairwise. 

Example 10

def print_scores(scores, cg_names, ref_cg):
    if ref_cg is None:
        for cg_name in cg_names:
            bleu, iter_ = max(scores[cg_name], key=lambda item: item[0])
            print 'Best score for [{}]: {} at iter {}'.format(cg_name, bleu,
                                                              iter_)
    else:
        bleu, iter_ = max(scores[ref_cg], key=lambda item: item[0])
        print 'Best score for [{}]: {} at iter {}'.format(ref_cg, bleu, iter_)
        for cg_name in cg_names:
            if cg_name != ref_cg:
                bleu = [bb for bb, tt in scores[cg_name] if tt == iter_][0]
                print 'Best score for [{}]: {} at iter {}'.format(
                    cg_name, bleu, iter_) 

Example 11

def print_scores(top_lessons, titles=False):
    for lesson in top_lessons:
        lesson_id = lesson[0]
        num_recs = lesson[2] if titles else lesson[1]

        if titles:
            title = lesson[1]
            out_str = 'Course-ID: {0}, title: {1}, #recs: {2}'.format(lesson_id, title, num_recs)
        else:
            out_str = 'Course-ID: {0}, #recs: {1}'.format(lesson_id, num_recs)

        print(out_str) 

Example 12

def printClassScores(scoreList, instScoreList, args):
    if (args.quiet):
        return
    print args.bold + "classes          IoU      nIoU" + args.nocol
    print "--------------------------------"
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print "{:<14}: ".format(labelName) + iouStr + "    " + niouStr

# Print intersection-over-union scores for all categorys. 

Example 13

def print_scores(self, pred, k=10, only_first_name=True):
        """
        Print the scores (or probabilities) for the top-k predicted classes.

        :param pred:
            Predicted class-labels returned from the predict() function.

        :param k:
            How many classes to print.

        :param only_first_name:
            Some class-names are lists of names, if you only want the first name,
            then set only_first_name=True.

        :return:
            Nothing.
        """

        # Get a sorted index for the pred-array.
        idx = pred.argsort()

        # The index is sorted lowest-to-highest values. Take the last k.
        top_k = idx[-k:]

        # Iterate the top-k classes in reversed order (i.e. highest first).
        for cls in reversed(top_k):
            # Lookup the class-name.
            name = self.name_lookup.cls_to_name(cls=cls, only_first_name=only_first_name)

            # Predicted score (or probability) for this class.
            score = pred[cls]

            # Print the score and class-name.
            print("{0:>6.2%} : {1}".format(score, name)) 

Example 14

def printClassScores(scoreList, instScoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr + "    " + niouStr)

# Print intersection-over-union scores for all categorys. 

Example 15

def print_columns_with_scores(self):
        mode = self.mode
        self.set_fields_mode()
        seen_nid = dict()
        for el, score in self._chosen_rank:
            if el not in seen_nid:
                print(str(el) + " -> " + str(score))
            seen_nid[el] = 0
        self._mode = mode 

Example 16

def printCategoryScores(scoreDict, instScoreDict, args):
    if (args.quiet):
        return
    print args.bold + "categories       IoU      nIoU" + args.nocol
    print "--------------------------------"
    for categoryName in scoreDict:
        if all( label.ignoreInEval for label in category2labels[categoryName] ):
            continue
        iouStr  = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
        niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
        print "{:<14}: ".format(categoryName) + iouStr + "    " + niouStr

# Evaluate image lists pairwise. 

Example 17

def printClassScores(scoreList, instScoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr + "    " + niouStr)

# Print intersection-over-union scores for all categorys. 

Example 18

def print_scores(scores, etype):
    levels = ['easy', 'medium', 'hard', 'extra', 'all']
    partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
                     'group', 'order', 'and/or', 'IUEN', 'keywords']

    print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
    counts = [scores[level]['count'] for level in levels]
    print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))

    if etype in ["all", "exec"]:
        print('=====================   EXECUTION ACCURACY     =====================')
        this_scores = [scores[level]['exec'] for level in levels]
        print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))

    if etype in ["all", "match"]:
        print('\n====================== EXACT MATCHING ACCURACY =====================')
        exact_scores = [scores[level]['exact'] for level in levels]
        print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
        print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
        for type_ in partial_types:
            this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
            print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))

        print('---------------------- PARTIAL MATCHING RECALL ----------------------')
        for type_ in partial_types:
            this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
            print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))

        print('---------------------- PARTIAL MATCHING F1 --------------------------')
        for type_ in partial_types:
            this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
            print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores)) 

Example 19

def printCategoryScores(scoreDict, instScoreDict, args):
    if (args.quiet):
        return
    print(args.bold + "categories       IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for categoryName in scoreDict:
        if all( label.ignoreInEval for label in category2labels[categoryName] ):
            continue
        iouStr  = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
        niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
        print("{:<14}: ".format(categoryName) + iouStr + "    " + niouStr)

# Evaluate image lists pairwise. 

Example 20

def print_scores(self):
        """This method yields dictionary rows that are given to a csv.DictWriter class."""
        self.calculate_scores()
        if self.regressor is None:
            score_keys = sorted(list(self.json_conf["scoring"].keys()) + ["source_score"])
        else:
            score_keys = sorted(self.regressor.metrics + ["source_score"])
        keys = ["tid", "alias", "parent", "score"] + score_keys

        for tid in self.scores:
            row = dict().fromkeys(keys)
            row["tid"] = tid
            row["parent"] = self.id
            row["alias"] = self.transcripts[tid].alias
            if tid in self._not_passing:
                row["score"] = 0
            else:
                row["score"] = round(self.scores[tid]["score"], 2)
            calculate_total = (self.regressor is None)
            for key in score_keys:
                if calculate_total:
                    assert self.scores[tid][key] != "NA" and self.scores[tid][key] is not None
                    row[key] = round(self.scores[tid][key], 2)

            if calculate_total is True:
                score_sum = sum(row[key] for key in score_keys)
                if tid not in self._not_passing and self.scores[tid]["score"] > 0:
                    assert round(score_sum, 2) == round(self.scores[tid]["score"], 2), (
                        score_sum,
                        self.transcripts[tid].score,
                        tid)
                else:
                    assert self.scores[tid]["score"] == 0

            yield row 

Example 21

def print_scores(self, pred, k=10, only_first_name=True):
        """
        Print the scores (or probabilities) for the top-k predicted classes.

        :param pred:
            Predicted class-labels returned from the predict() function.

        :param k:
            How many classes to print.

        :param only_first_name:
            Some class-names are lists of names, if you only want the first name,
            then set only_first_name=True.

        :return:
            Nothing.
        """

        vals = []

        # Get a sorted index for the pred-array.
        idx = pred.argsort()

        # The index is sorted lowest-to-highest values. Take the last k.
        top_k = idx[-k:]

        # Iterate the top-k classes in reversed order (i.e. highest first).
        for cls in reversed(top_k):
            # Lookup the class-name.
            name = self.name_lookup.cls_to_name(cls=cls, only_first_name=only_first_name)

            # Predicted score (or probability) for this class.
            score = pred[cls]
            vals.append({"score": float(score), "name": name})
            # Print the score and class-name.
            # print("{0:>6.2%} : {1}".format(score, name))
        return vals 

Example 22

def printCategoryScores(scoreDict, instScoreDict, args):
    if (args.quiet):
        return
    print(args.bold + "categories       IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for categoryName in scoreDict:
        if all( label.ignoreInEval for label in category2labels[categoryName] ):
            continue
        iouStr  = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
        niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
        print("{:<14}: ".format(categoryName) + iouStr + "    " + niouStr)

# Evaluate image lists pairwise. 

Example 23

def printClassScores(scoreList, instScoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr + "    " + niouStr)

# Print intersection-over-union scores for all categorys. 

Example 24

def print_scores(self, metric_types=None):
        if metric_types is None:
            metric_types = self.metric_types

        scores = [np.mean([self.scores[m][trial] for trial in self.trials]) for m in metric_types]
        txt = "All: " + " ".join(["{}:{:.04}".format(metric_types[i], scores[i]) for i in range(len(metric_types))])
        print(txt) 

Example 25

def printCategoryScores(scoreDict, instScoreDict, args):
    if (args.quiet):
        return
    print(args.bold + "categories       IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for categoryName in scoreDict:
        if all( label.ignoreInEval for label in category2labels[categoryName] ):
            continue
        iouStr  = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
        niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
        print("{:<14}: ".format(categoryName) + iouStr + "    " + niouStr)

# Evaluate image lists pairwise. 

Example 26

def print_cricket_scores():
    print "=================CRICKET SCORES=================================="
    try:
        parsed_scores = _fetch_and_parse()
        for i, score in enumerate(parsed_scores):
            print beautify(i+1, score)
    except:
        print beautify(0, 'No cricket scores to display.')
    print "***************************************************************" 

Example 27

def print_subloci_scores(self):
        """Wrapper method to create a csv.DictWriter instance and call the
        sublocus.print_metrics method
        on it for each sublocus."""

        # self.get_sublocus_metrics()

        for slocus in self.subloci:
            for row in slocus.print_scores():
                yield row 

Example 28

def print_monoholder_scores(self):

        """Wrapper method to create a csv.DictWriter instance and call
        the MonosublocusHolder.print_scores method on it."""

        self.define_monosubloci()

        # self.available_monolocus_metrics = set(self.monoholder.available_metrics)
        if len(self.monoholders) == 0:
            return
        for monoholder in self.monoholders:
            for row in monoholder.print_scores():
                yield row 

Example 29

def print_scores(self, pred, k=10, only_first_name=True):
        """
        Print the scores (or probabilities) for the top-k predicted classes.

        :param pred:
            Predicted class-labels returned from the predict() function.

        :param k:
            How many classes to print.

        :param only_first_name:
            Some class-names are lists of names, if you only want the first name,
            then set only_first_name=True.

        :return:
            Nothing.
        """

        # Get a sorted index for the pred-array.
        idx = pred.argsort()

        # The index is sorted lowest-to-highest values. Take the last k.
        top_k = idx[-k:]

        # Iterate the top-k classes in reversed order (i.e. highest first).
        for cls in reversed(top_k):
            # Lookup the class-name.
            name = self.name_lookup.cls_to_name(cls=cls, only_first_name=only_first_name)

            # Predicted score (or probability) for this class.
            score = pred[cls]

            # Print the score and class-name.
            print("{0:>6.2%} : {1}".format(score, name)) 

Example 30

def print_scores(self, pred, k=10, only_first_name=True):
        """
        Print the scores (or probabilities) for the top-k predicted classes.

        :param pred:
            Predicted class-labels returned from the predict() function.

        :param k:
            How many classes to print.

        :param only_first_name:
            Some class-names are lists of names, if you only want the first name,
            then set only_first_name=True.

        :return:
            Nothing.
        """

        # Get a sorted index for the pred-array.
        idx = pred.argsort()

        # The index is sorted lowest-to-highest values. Take the last k.
        top_k = idx[-k:]

        # Iterate the top-k classes in reversed order (i.e. highest first).
        for cls in reversed(top_k):
            # Lookup the class-name.
            name = self.name_lookup.cls_to_name(cls=cls, only_first_name=only_first_name)

            # Predicted score (or probability) for this class.
            score = pred[cls]

            # Print the score and class-name.
            print("{0:>6.2%} : {1}".format(score, name)) 

Example 31

def printCategoryScores(scoreDict, instScoreDict, args):
    if (args.quiet):
        return
    print(args.bold + "categories       IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for categoryName in scoreDict:
        if all( label.ignoreInEval for label in category2labels[categoryName] ):
            continue
        iouStr  = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
        niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
        print("{:<14}: ".format(categoryName) + iouStr + "    " + niouStr)

# Evaluate image lists pairwise. 

Example 32

def print_performance_scores(performance_scores, title=None):
        """Prints an ASCII table of performance scores.

        Args:
            performance_scores: A dictionary of label, score pairs where label is a class tag and
                scores is a 4-tuple containing precision, recall, f1 and support
            title (str): The title of the table (uppercased).

        Preconditions:
            Assumes the values of performance_scores are 4-tuples, where the first three items are
            float representaions of a percentage and the last item is an count integer.
        """
        # create table, give it a title a column names
        table = PrettyTable()
        if title is not None:
            table.title = title.upper()
        table.field_names = ['Label', 'Precision', 'Recall', 'F1', 'Support']
        # column alignment
        table.align['Label'] = 'l'
        table.align['Precision'] = 'r'
        table.align['Recall'] = 'r'
        table.align['F1'] = 'r'
        table.align['Support'] = 'r'
        # create and add the rows
        for label, scores in performance_scores.items():
            row = [label]
            # convert scores to formatted percentage strings
            support = scores[-1]
            performance_metrics = ['{:.2%}'.format(x) for x in scores[:-1]]
            row_scores = performance_metrics + [support]

            row.extend(row_scores)
            table.add_row(row)

        print(table) 

Example 33

def printClassScores(scoreList, instScoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr + "    " + niouStr)

# Print intersection-over-union scores for all categorys. 

Example 34

def printClassScoresPytorch(scoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        #niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr )#+ "    " + niouStr)

# Print intersection-over-union scores for all classes. 

Example 35

def printCategoryScores(scoreDict, instScoreDict, args):
    if (args.quiet):
        return
    print(args.bold + "categories       IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for categoryName in scoreDict:
        if all( label.ignoreInEval for label in category2labels[categoryName] ):
            continue
        iouStr  = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
        niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
        print("{:<14}: ".format(categoryName) + iouStr + "    " + niouStr)

# Evaluate image lists pairwise. 

Example 36

def printClassScores(scoreList, instScoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (id2label[label].ignoreInEval):
            continue
        labelName = str(id2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr + "    " + niouStr)

# Print intersection-over-union scores for all categorys. 

Example 37

def printClassScoresPytorchTrain(scoreList, args):
    if (args.quiet):
        return
    print(args.bold + "classes          IoU      nIoU" + args.nocol)
    print("--------------------------------")
    for label in args.evalLabels:
        if (trainId2label[label].ignoreInEval):
            continue
        labelName = str(trainId2label[label].name)
        iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
        #niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
        print("{:<14}: ".format(labelName) + iouStr )#+ "    " + niouStr) 

Example 38

def print_tables_with_scores(self):

        def aggr_certainty_table(group_by_table):
            ranked_list = []
            # First aggregate bitsets
            for x, score in self._chosen_rank:
                new_score = score
                if x.source_name in group_by_table:
                    old_score = group_by_table[x.source_name]
                    new_score = old_score + new_score
                group_by_table[x.source_name] = new_score
            for table, score_value in group_by_table.items():
                value = (table, score_value)
                ranked_list.append(value)
            ranked_list = sorted(ranked_list, key=lambda a: a[1], reverse=True)
            return ranked_list

        def aggr_coverage_table(group_by_table):
            ranked_list = []
            # First aggregate bitsets
            for x, score in self._chosen_rank:
                coverage_score, new_coverage_set = score
                if x.source_name in group_by_table:
                    old_coverage_set = group_by_table[x.source_name]
                    new_coverage_set = new_coverage_set | old_coverage_set
                group_by_table[x.source_name] = new_coverage_set
            for table, bitset in group_by_table.items():
                new_score = bitset.count() / bitset.length()
                value = (table, new_score)
                ranked_list.append(value)
            ranked_list = sorted(ranked_list, key=lambda a: a[1], reverse=True)
            return ranked_list

        mode = self.mode  # save state
        self.set_fields_mode()
        group_by_table = dict()
        if self._ranking_criteria == self.RankingCriteria.CERTAINTY:
            ranked_list = aggr_certainty_table(group_by_table)
        elif self._ranking_criteria == self.RankingCriteria.COVERAGE:
            ranked_list = aggr_coverage_table(group_by_table)

        for x in ranked_list:
            print(x)
        self._mode = mode  # recover state 

How do you find the accuracy score in Python?

How to Calculate Balanced Accuracy in Python Using sklearn.
Balanced accuracy = (Sensitivity + Specificity) / 2..
Balanced accuracy = (0.75 + 9868) / 2..
Balanced accuracy = 0.8684..

How do you find accuracy score?

Accuracy is a metric used in classification problems used to tell the percentage of accurate predictions. We calculate it by dividing the number of correct predictions by the total number of predictions.

What does score () do in Python?

Assuming it's sci-kit learn in python then model. score automates the prediction of your data using X_test and compares it with Y_test and by default uses the R-squared metric to so (hence don't need to manually derive y_pred ).

How does Python calculate accuracy and precision?

# accuracy: (tp + tn) / (p + n) accuracy = accuracy_score(testy, yhat_classes).
print('Accuracy: %f' % accuracy) # precision tp / (tp + fp).
precision = precision_score(testy, yhat_classes) print('Precision: %f' % precision).
# recall: tp / (tp + fn) ... .
print('Recall: %f' % recall) ... .
f1 = f1_score(testy, yhat_classes).