testpage

Code Example

#include <stdio.h>
int main()
{
   // printf() displays the string inside quotation
   printf("Hello, World!");
   return 0;
}

and some python

#!/usr/bin/python 
import sys
import signal
import time
import argparse

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torch.optim as optim
from torch.autograd import Variable

import torchvision
from torchvision import datasets, transforms, models

import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter, ScalarFormatter
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)

from io import open
import os
from os import path
from PIL import Image
import json

#Colours for print()
CBLACK  = '\33[30m'
CRED    = '\33[31m'
CGREEN  = '\33[32m'
CYELLOW = '\33[33m'
CBLUE   = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE  = '\33[36m'
CWHITE  = '\33[37m'
CEND    = '\033[0m'

ap = argparse.ArgumentParser(prog='PROG',description='Process some integers.')
ap.add_argument('-v', '--version', action='version', version='%(prog)s 0.1b')
ap.add_argument('-t', '--train', action="store_true", default=False,help='Runs Training, CTRL+C to Exit.')
ap.add_argument('-s', '--stats', action="store_true", default=False,help='Outputs Plots and JSON')
ap.add_argument('-i', '--info', action="store_true", default=False,help='Outputs Model Information to CLI')
#ap.add_argument('-f', '--file', 'filename', nargs='?',help='Evaluates an Image against the Model')
ap.add_argument('-c', '--clear', default=['0'], nargs="*" ,help='Clears State Data Usage: all,hist,opt')
a = ap.parse_args()
statsarg = a.stats
clear = a.clear
training = a.train

def signal_handler(signal, frame):
    global interrupted
    interrupted = True
signal.signal(signal.SIGINT, signal_handler)
interrupted = False

net = models.resnet18(pretrained = False)

modelsavelocation = './models/Res.pth'
dashboardtemplatehead = './web/templates/head.html'
dashboardtemplatebody = './web/templates/body.html'
dashboardindex = './web/index.html'

batch_size = 16

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(CRED+'\n  @'+CGREEN+'}-,-`- '+CEND+' Pytorch Model and Dashboard\n')
print('Cuda Version: '+CYELLOW+'{}'.format(torch.version.cuda)+CEND)
print('Using Cuda: '+CRED+'{}\n'.format(torch.cuda.is_available())+CEND)

transform_train = transforms.Compose([
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

transform_test = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224), 
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck', 'cattle')

cwd=os.getcwd()
plt.style.use('dark_background')

def img(img,location):
        plt.figure()
        plt.rc('font',family='Ubuntu')
        img = img / 2 + 0.5     # unnormalize
        npimg = img.numpy()
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        plt.savefig("web/current/{}.png".format(os.path.splitext(location)[0]), bbox_inches="tight", pad_inches=0, transparent=True)
        plt.close()

def imgp(img,title,location):
        plt.figure()
        plt.rc('font',family='Ubuntu')
        img = img / 2 + 0.5     # unnormalize
        npimg = img.numpy()
        plt.title(title,fontsize=18)
        plt.imshow(np.transpose(npimg, (1, 2, 0)))
        plt.savefig("web/current/{}.png".format(os.path.splitext(location)[0]), bbox_inches="tight", pad_inches=0, transparent=True)
        plt.close()

def plot(data,title,colour,location):
    plt.figure()
    plt.rc('font',family='Ubuntu')
    plt.title(title, fontsize=16)
    ax = plt.subplot(111)
    if title == "Bad Epochs":
        ax.plot(data, linewidth=1, color=colour,drawstyle='steps-pre')
    else:
        ax.plot(data, linewidth=1, color=colour)
    ax.margins(0)
    ax.grid(False)
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.yaxis.set_ticks_position('left')
    ax.xaxis.set_ticks_position('bottom')
    ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
    plt.savefig("web/current/{}.png".format(location), bbox_inches="tight", pad_inches=0, transparent=True)
    plt.close()

 
net = net.cuda()

#optim.SGD(net.parameters(), lr=0.1)
optimizer = optim.Adam(net.parameters(), lr=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)

epoch = 0
epoch_acc = 0.0
epoch_loss = 0.0
best_acc = 0.0
bad_epochs = 0
epoch_lr = 0.0
val_acc_history = []
val_loss_history = []
val_lr_history = []
val_save_history = []

def load_model():
    global clear
    global epoch
    global best_acc
    global val_acc_history
    global val_loss_history
    global val_lr_history
    global val_save_history
    if os.path.isfile(modelsavelocation) == True:
        if ('all' in clear) == False:
            print('Loading Model... '),
            loadmodel = torch.load(modelsavelocation)
            net.load_state_dict(loadmodel['net'])
            best_acc = loadmodel['best_acc']
        else:
            if raw_input(CRED+"Are you sure you want to Delete your Model?"+CEND+" (y/n)") == "y":
                print('Deleting Model -_-;')
                os.remove(modelsavelocation) 
                sys.exit()
            else:
                print('Not Deleting ^_^')
                sys.exit()
        if ('opti' in clear) == False:
            print('Optimizer and Scheduler... '),
            scheduler.load_state_dict(loadmodel['scheduler'])
            optimizer.load_state_dict(loadmodel['optimizer'])
        if ('hist' in clear) == False:
            print('History... '),
            epoch = loadmodel['epoch']
            val_acc_history = loadmodel['val_acc_history']
            val_loss_history = loadmodel['val_loss_history']
            val_lr_history = loadmodel['val_lr_history']
            val_save_history = loadmodel['val_save_history']
        print('Complete\n')
    else:
        print('No Saved Model... Creating')
        save_model()



def save_model():
    print('Saving Model')
    state = {
        'net': net.state_dict(),
        'best_acc': best_acc,
        'epoch': epoch,
        'scheduler' : scheduler.state_dict(),
        'optimizer' : optimizer.state_dict(),
        'val_acc_history': val_acc_history,
        'val_loss_history': val_loss_history,
        'val_lr_history': val_lr_history,
        'val_save_history': val_save_history,
    }
    torch.save(state, modelsavelocation)

def dashboard():
    global epoch
    global best_acc
    global epoch_lr
    global epoch_loss
    global epoch_acc
    global bad_epochs
    index = open(dashboardindex,"w")
    templatehead= open(dashboardtemplatehead).read()
    templatebody = open(dashboardtemplatebody).read().format(vepoch=epoch,vepoch_acc=epoch_acc,vbest_acc=best_acc,vepoch_loss=epoch_loss,vepoch_lr=epoch_lr,vbad_epochs=bad_epochs)

    index.write(templatehead)
    index.write(templatebody)
    index.close()

def stats():
    plot(val_acc_history,"Accuracy","orangered","plot1")
    plot(val_loss_history,"Loss","deepskyblue","plot2")
    plot(val_lr_history,"Learning Rate","yellowgreen","plot3")
    plot(val_save_history,"Bad Epochs","orchid","plot4")

    statsdict = {
        'best_acc': str(best_acc.cpu().numpy().item()),
        'epoch': str(epoch),
    }
    jsondict = json.dumps(statsdict)
    f = open("web/stats.json","w")
    f.write(unicode(jsondict))
    f.close()
    dashboard()

load_model()

#Replot for ease of use
stats()
if statsarg == True:
    print('Exported Stats, Exiting.')
    sys.exit()

#if device == 'cuda':
##    net.features = torch.nn.DataParallel(net.features)
    
 #   cudnn.benchmark = True


criterion = nn.CrossEntropyLoss()


if training == True:
    net.train()
    since = time.time()
    print('Starting @ Epoch '+CYELLOW+'{}'.format(epoch)+CEND+' with an Accuracy of '+CRED+'{:.5f}\n'.format(best_acc)+CEND)
    
    while(True):
        if clear == True:
            print('Model Cleared of History')
            save_model()
            break
        running_loss = 0
        running_corrects = 0

        for i, (inputs, targets) in enumerate(trainloader):

            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            optimizer.zero_grad()
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

            _, preds = torch.max(outputs, 1)

            running_loss += loss.item() * inputs.size(0)
            corrects = torch.sum(preds == targets.data)
            running_corrects += corrects
            #if i % 1001 == 1000:
            #    time_i = time.time() - since
            #    print('[{}] {:.0f}m {:.0f}s'.format(i,time_i // 60, time_i % 60))
            
            if interrupted:
                time_elapsed = time.time() - since
                print('\n\nTrained Model for {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
                print('Best Accuracy: {:.5f}'.format(best_acc))
                print("Exiting Training\n")
                sys.exit()

        scheduler.step()
        epoch += 1
        epoch_loss = running_loss / len(trainloader)
        epoch_acc = running_corrects.double() / len(trainloader.dataset)

        for param_group in optimizer.param_groups:
            val_lr_history.append(param_group['lr'])
            epoch_lr=(param_group['lr'])
        val_acc_history.append(epoch_acc)
        val_loss_history.append(epoch_loss)

        if epoch_acc > best_acc:
                best_acc = epoch_acc
                bad_epochs = 0
                val_save_history.append(bad_epochs)
                save_model()
                
        else:
            bad_epochs += 1
            val_save_history.append(bad_epochs)


            
        #CLI Display
        print('Epoch:'+CYELLOW+'{}'.format(epoch)+CEND+' Acc:'+CRED+'{:.5f}'.format(epoch_acc)+CEND+'/'+CRED+'{:.5f}'.format(best_acc)+CEND+' Loss:'+CBLUE+'{:.2f}'.format(epoch_loss)+CEND+' Lr:'+CGREEN+'{:.5}'.format(epoch_lr)+CEND+' Bad:'+CVIOLET+'{}'.format(bad_epochs)+CEND+'\n')
        

        
        #Plot and Output JSON
        stats()
     
if training == False:
    net.eval()

    # for i, (inputs, labels) in enumerate(testloader):
    #     inputs = inputs.to(device)
    #     labels = labels.to(device)
    # # print images
    #     img(torchvision.utils.make_grid(inputs.cpu()),'grid')
    #     print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
    #     print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
    #                             for j in range(4)))
    #     if i == 0:
    #         break


    correct = 0
    total = 0
    with torch.no_grad():
        for i, (inputs, labels) in enumerate(testloader):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = net(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            if interrupted:
                print("Exiting Training\n")
                sys.exit()

    print('Accuracy of the network on the 10000 test images: %d %%' % (
        100 * correct / total))


    # class_correct = list(0. for i in range(10))
    # class_total = list(0. for i in range(10))
    # with torch.no_grad():
    #     for i, (inputs, labels) in enumerate(testloader):
    #         inputs = inputs.to(device)
    #         labels = labels.to(device)
    #         outputs = net(inputs)
    #         _, predicted = torch.max(outputs, 1)
    #         c = (predicted == labels).squeeze()
    #         for i in range(4):
    #             label = labels[i]
    #             class_correct[label] += c[i].item()
    #             class_total[label] += 1


    # for i in range(10):
    #     print('Accuracy of %5s : %2d %%' % (
    #         classes[i], 100 * class_correct[i] / class_total[i]))
    print("User Image Test:")

    def predict_image(image_path):
        image = Image.open(image_path)

        transformation = transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

        ])

        image_tensor = transformation(image).float()
        image_tensor = image_tensor.unsqueeze_(0)
        image_tensor.cuda()

        with torch.no_grad():
            for i, (inputs, labels) in enumerate(testloader):
                inputs = Variable(image_tensor).to(device)
                labels = labels.to(device)
                output = net(inputs)
                index = output.data.cpu().numpy().argmax()
                _, predicted = torch.max(output, 1)

        print('{} predicted as {}'.format(imagefile, classes[predicted[0]]))
        imgp(torchvision.utils.make_grid(inputs.cpu()),'{} predicted as {}'.format(imagefile, classes[predicted[0]]),imagefile)

        return index


    if __name__ == "__main__":
        directory =  os.path.join(os.getcwd(), 'testimages/')
        for filename in os.listdir(directory):
            if filename.endswith(".png") or filename.endswith(".jpg"): 
                imagefile = filename

            imagepath = os.path.join(directory, imagefile)
            index = predict_image(imagepath)
            if interrupted:
                print("Exiting Training\n")
                sys.exit()

DokuWiki

DokuWiki is a simple to use and highly versatile Open Source wiki software that doesn't require a database. It is loved by users for its clean and readable syntax. The ease of maintenance, backup and integration makes it an administrator's favorite. Built in access controls and authentication connectors make DokuWiki especially useful in the enterprise context and the large number of plugins contributed by its vibrant community allow for a broad range of use cases beyond a traditional wiki.

Read the DokuWiki Manual to unleash the full power of DokuWiki.

Download

DokuWiki is available at http://download.dokuwiki.org/

Read More

2004-2013 © Andreas Gohr andi@splitbrain.org1) and the DokuWiki Community

The DokuWiki engine is licensed under GNU General Public License Version 2. If you use DokuWiki in your company, consider donating a few bucks ;-).

Not sure what this means? See the FAQ on the Licenses.

1)
Please do not contact me for help and support – use the mailinglist or forum instead