D
D
Dragon12020-11-27 20:26:53
C++ / C#
Dragon1, 2020-11-27 20:26:53

Why is there a difference in program execution?

the code

#include <stdio.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <math.h>
//Size of data
long Size;
char c1;
char c2;
long int i;
long int n;

int main(void)
{
  Size = 1000000;
  c1 = 5;
  c2 = 3;
  unsigned char a[Size];
  unsigned char b[Size];
  clock_t c0, c1; /* clock_t is defined on <time.h> and <sys/types.h> as int */
  for (i=0; i<=Size; i++)
    {
      a[i]=2;
      b[i]=5;
    }
  c0 = clock();
  for (n=0; n<600; n++)
  {
  for (i=0; i<=Size-2; i=i+2)
    {
    a [i] = b[i] + c1;
    a [i+1] = b[i+1] * c2;
    }
  }
  c1 = clock();
  printf ("CPU time:        %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC);

  for (i=0; i<=Size; i++)
    {
    a[i]=2;
    b[i]=5;
    }
  c0 = clock();
  for (n=0; n<600; n++)
    {
  for (i=0; i<=Size-8; i=i+8)
    {
    a [i] = b[i] + c1;
    a [i+1] = b[i+1] * c2;
    a [i+2] = b[i+2] + c1;
    a [i+3] = b[i+3] * c2;
    a [i+4] = b[i+4] + c1;
    a [i+5] = b[i+5] * c2;
    a [i+6] = b[i+6] + c1;
    a [i+7] = b[i+7] * c2;
    } 
  }
  c1 = clock();
  printf ("4x CPU time:        %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC);
  
for (i=0; i<=Size; i++)
  {
  a[i]=2;
  b[i]=5;
  }
  c0 = clock();
  for (n=0; n<600; n++)
    {
  for (i=0; i<=Size-20; i=i+20)
    {
    a [i] = b[i] + c1;
    a [i+1] = b[i+1] * c2;
    a [i+2] = b[i+2] + c1;
    a [i+3] = b[i+3] * c2;
    a [i+4] = b[i+4] + c1;
    a [i+5] = b[i+5] * c2;
    a [i+6] = b[i+6] + c1;
    a [i+7] = b[i+7] * c2;
    a [i+8] = b[i+8] + c1;
    a [i+9] = b[i+9] * c2;
    a [i+10] = b[i+10] + c1;
    a [i+11] = b[i+11] * c2;
    a [i+12] = b[i+12] + c1;
    a [i+13] = b[i+13] * c2;
    a [i+14] = b[i+14] + c1;
    a [i+15] = b[i+15] * c2;
    a [i+16] = b[i+16] + c1;
    a [i+17] = b[i+17] * c2;
    a [i+18] = b[i+18] + c1;
    a [i+19] = b[i+19] * c2;
    } 
  }
  c1 = clock();
  printf ("10x CPU time:        %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC);		

  return 0;
}

There is a code, as I understand it, the program processes an array consisting of 1,000,000 elements 600 times. First 1 element is processed in the loop, then 4, then 10.
I ran the program in a virtual machine and got the following results, why is there a difference?
CPU time: 2.506459
4x CPU time: 2.087346
10x CPU time: 1.991994

Answer the question

In order to leave comments, you need to log in

1 answer(s)
J
jajabobo, 2020-12-10
@jajabobo

Firstly, the same code running in the OS can run for as long as you like, depending on a million reasons. Secondly, you run different code, why do you expect there to be no difference at all?

Didn't find what you were looking for?

Ask your question

Ask a Question

731 491 924 answers to any question