Why would you want to though, you can get algorithm examples in more recent languages
troff is still very much alive, as groff and still used for man pages.
It is just a procedural language, like C, or Algol, or Pascal, or anything similar. Nothing really oddball about it.
But yes, you should understand the examples in the book quite easily.
dcl some_variable fixed bin (17);
dcl some_array (10) fixed binary;
/*
Copyright 2015-2016 by Charles Anthony
All rights reserved.
This software is made available under the terms of the
ICU License -- ICU 1.8.1 and later.
See the LICENSE file at the top-level directory of this distribution and
at https://sourceforge.net/p/dps8m/code/ci/master/tree/LICENSE
*/
cload:
procedure;
/*
dcl ios_$attach entry (char (*) aligned, char (*), char (*), char (*), bit (72) aligned);
dcl com_err_ entry options (variable);
dcl card_device char (32) init ("rdrb");
dcl card_dim char (32) init ("crz");
dcl status bit (72) aligned;
dcl 1 stat aligned based (addr (status)),
2 word1 fixed bin (35),
2 iocode fixed bin (35);
*/
dcl iox_$attach_ioname entry (char (*), ptr, char (*), fixed bin (35));
dcl iox_$open entry (ptr, fixed bin, bit (1) aligned, fixed bin (35));
dcl iox_$read_record entry (ptr, ptr, fixed bin (21), fixed bin (21),
fixed bin (35));
dcl iox_$close entry (ptr, fixed bin (35));
dcl iox_$detach_iocb entry (ptr, fixed bin (35));
dcl iox_$get_chars entry (ptr, ptr, fixed bin(21), fixed bin(21),
fixed bin(35));
dcl hcs_$append_branch entry (char(*), char(*), fixed bin(5),
fixed bin(35));
dcl hcs_$append_branchx entry (char(*), char(*), fixed bin(5),
(3) fixed bin(6), char(*), fixed bin(1), fixed bin(1),
fixed bin(24), fixed bin(35));
dcl hcs_$make_seg entry (char(*), char(*), char(*), fixed bin(5),
ptr, fixed bin(35));
dcl hcs_$set_bc_seg entry (ptr, fixed bin(24), fixed bin(35));
dcl hcs_$terminate_noname entry (ptr, fixed bin(35));
dcl get_group_id_$tag_star returns (char (32) aligned);
dcl get_wdir_ entry returns (character (168));
dcl ioa_ entry options (variable);
dcl com_err_ entry options (variable);
dcl cu_$arg_ptr entry (fixed bin, ptr, fixed bin, fixed bin (35));
dcl cu_$arg_count entry (fixed bin);
dcl error_table_$end_of_info fixed binary (35) external;
dcl iocbp ptr;
dcl code fixed (35);
dcl buffer char (4096);
dcl p ptr;
dcl segp ptr;
dcl char_count fixed bin (21);
dcl bc fixed bin (24);
dcl this_read_bits fixed bin;
dcl this_read_chars fixed bin (21);
dcl path character (168);
dcl bn fixed bin;
dcl argp ptr,
argln fixed bin (17),
arg char (argln) based (argp) unaligned,
count fixed bin;
dcl 1 h based aligned,
2 type fixed binary, /* 0 */
2 bitcnt fixed binary, /* 1 */
2 dlen fixed binary, /* 2 */
2 dname character (168), /* 3 */
2 elen fixed binary, /* 45 */
2 ename character (32); /* 46 */
/* 54 */
dcl 1 s based aligned,
2 block (1:255) char (4096);
dcl 1 minbuf based aligned,
2 buf char (256);
dcl rings (3) fixed binary (6);
call cu_$arg_count (count);
if count ^= 1 then do;
call ioa_ ("Usage cload tape_label");
goto done;
end;
call cu_$arg_ptr (1, argp, argln, code);
rings (1) = 4;
rings (2) = 4;
rings (3) = 4;
call iox_$attach_ioname ("tape_switch", iocbp, "tape_nstd_ " || arg, code);
if code ^= 0 then do;
call com_err_ (code, "cload", "attach_ioname");
goto done;
end;
call iox_$open (iocbp, Sequential_input, "0"b, code);
if code ^= 0 then do;
call com_err_ (code, "cload", "open");
goto done1;
end;
p = addr (buffer);
/* skip the label and first mark */
call iox_$read_record (iocbp, p, 4096, char_count, code);
/*call ioa_ ("read label char_count ^d code ^d ^w", char_count, code, p -> h.type); */
if code ^= 0 then
call com_err_ (code, "cload", "read label");
call iox_$read_record (iocbp, p, 4096, char_count, code);
/*call ioa_ ("read label mark char_count ^d code ^d ^w", char_count, code, p -> h.type);*/
if code ^= error_table_$end_of_info | char_count ^= 0 then
call com_err_ (code, "cload", "read label mark");
/* Loop over segments */
loop:
/* Read the header */
call iox_$read_record (iocbp, p, 4096, char_count, code);
/* call ioa_ ("read code ^d char_count ^d type ^w bitcnt ^w", code, char_count, p -> h.type, p -> h.bitcnt); */
if code ^= 0 then do;
call com_err_ (code, "cload", "read header");
goto done2;
end;
if p -> h.type = 0 then do; /* dir */
call ioa_ ("DIR: ^a>^a", p -> h.dname, p -> h.ename);
if p -> h.dlen ^= 0 then
path = rtrim (get_wdir_ ()) || ">" || substr (p -> h.dname, 1, p -> h.dlen);
else
path = rtrim (get_wdir_ ());
/* call ioa_ ("^a", path); */
call hcs_$append_branchx (path,
substr (p -> h.ename, 1, p -> h.elen),
1011b, /* sma */
rings,
(get_group_id_$tag_star ()), /* user_id */
1b, /* dirsw */
0b, /* copy_sw */
0, /* bit_count */
code);
if code ^= 0 then do;
call com_err_ (code, "cload", "append_branchx");
goto done2;
end;
end;
else if p -> h.type = 1 then do; /* seg */
call ioa_ ("SEG: ^a>^a", p -> h.dname, p -> h.ename);
call hcs_$make_seg (rtrim (get_wdir_ ()) || ">" || substr (p -> h.dname, 1, p -> h.dlen),
substr (p -> h.ename, 1, p -> h.elen),
"", /* ref_name */
1110b, /* rew */
segp,
code);
if code ^= 0 then do;
call com_err_ (code, "cload", "make_seg");
goto done2;
end;
bc = p -> h.bitcnt;
call hcs_$set_bc_seg (segp, bc, code);
if code ^= 0 then do;
call com_err_ (code, "cload", "set_bc_seg");
goto done2;
end;
bn = 0;
dloop:
if (bc > 0) then do;
/* Calculate the number of bits in the next block */
this_read_bits = bc;
if this_read_bits > 4096 * 9 then
this_read_bits = 4096 * 9;
this_read_chars = this_read_bits / 9;
/* minium buffer size */
if this_read_chars < 256 then
this_read_chars = 256;
bn = bn + 1;
p = addr (segp -> s.block (bn));
/* zero out the minimum buffer size to handle short reads */
p -> minbuf.buf = char (0);
/* Read a block of data */
call iox_$read_record (iocbp, p, this_read_chars, char_count, code);
if code ^= 0 then do;
call com_err_ (code, "cload", "read data");
goto done2;
end;
bc = bc - this_read_bits;
goto dloop;
end;
call hcs_$terminate_noname (segp, code);
if code ^= 0 then do;
call com_err_ (code, "cload", "terminate data");
goto done2;
end;
end;
else if p -> h.type = 2 then do; /* end */
call ioa_ ("END: ^a>^a", p -> h.dname, p -> h.ename);
goto done2;
end;
else do;
call ioa_ ("Bad value in h.type: ^d", p -> h.type);
goto done2;
end;
/* read eod mark */
p = addr (buffer);
call iox_$read_record (iocbp, p, 4096, char_count, code);
/* call ioa_ ("read eod mark char_count ^d code ^d ^w", char_count, code, p -> h.type); */
if code ^= error_table_$end_of_info | char_count ^= 0 then do;
call com_err_ (code, "cload", "read eod mark");
goto done2;
end;
goto loop;
done2:
call iox_$close (iocbp, code);
done1:
call iox_$detach_iocb (iocbp, code);
done:
return;
%include iox_modes;
end;
Talking about procedural languages, I usually write programs in C89/99.
...
Will I understand them? How much PL/I is different from K&R C? :o
If, after a small amount of reading, can't understand another contempory procedural language, then you probably aren't competent to use C.
If, after a small amount of reading, can't understand another contempory procedural language, then you probably aren't competent to use C.
Well... yes, but I'd temper this a bit. I guess it would all depend on what you mean by "understand". And by "contemporary", I suppose you mean,as in the same period as original C? Or did you mean as in "recent"?
In both cases, a few languages, either from the 60s/70s or recent, are pretty quirky. And not immediately figuring them out is no sign that you aren't competent to use C. Just a thought...
If, after a small amount of reading, can't understand another contempory procedural language, then you probably aren't competent to use C.
Well... yes, but I'd temper this a bit. I guess it would all depend on what you mean by "understand". And by "contemporary", I suppose you mean,as in the same period as original C? Or did you mean as in "recent"?
In both cases, a few languages, either from the 60s/70s or recent, are pretty quirky. And not immediately figuring them out is no sign that you aren't competent to use C. Just a thought...
I was thinking of both originating 50+ years ago, when there were very few alternative programming paradigms except procedural.
I will stand by my statement that back then there was relatively little difference between mainstream procedural languages. Certainly not as much as between future paradigms such as first order predicate logic, relational database, object oriented, discrete event simulation, statistics, FSM, etc languages.
If, after a small amount of reading, can't understand another contempory procedural language, then you probably aren't competent to use C.
Well... yes, but I'd temper this a bit. I guess it would all depend on what you mean by "understand". And by "contemporary", I suppose you mean,as in the same period as original C? Or did you mean as in "recent"?
In both cases, a few languages, either from the 60s/70s or recent, are pretty quirky. And not immediately figuring them out is no sign that you aren't competent to use C. Just a thought...
I was thinking of both originating 50+ years ago, when there were very few alternative programming paradigms except procedural.
I will stand by my statement that back then there was relatively little difference between mainstream procedural languages. Certainly not as much as between future paradigms such as first order predicate logic, relational database, object oriented, discrete event simulation, statistics, FSM, etc languages.
Generalizing a bit yes, but I can think of at least a couple languages that would certainly be pretty far from C, and even probably looking like garbage if you don't know anything about them.
First: Forth. Yes, it's actually classified as a procedural language.
Then there would be Smalltalk. I'm not completely sure it qualifies as a "procedural language", although it's a bit hard to tell. I'll leave it to your appreciation.
Now of course, if we consider more recent languages, there are quite a few that are a bit hard to decipher, even though they are still procedural, although most of those would, admittedly, not be widely popular.
If, after a small amount of reading, can't understand another contempory procedural language, then you probably aren't competent to use C.
Well... yes, but I'd temper this a bit. I guess it would all depend on what you mean by "understand". And by "contemporary", I suppose you mean,as in the same period as original C? Or did you mean as in "recent"?
In both cases, a few languages, either from the 60s/70s or recent, are pretty quirky. And not immediately figuring them out is no sign that you aren't competent to use C. Just a thought...
I was thinking of both originating 50+ years ago, when there were very few alternative programming paradigms except procedural.
I will stand by my statement that back then there was relatively little difference between mainstream procedural languages. Certainly not as much as between future paradigms such as first order predicate logic, relational database, object oriented, discrete event simulation, statistics, FSM, etc languages.
Generalizing a bit yes, but I can think of at least a couple languages that would certainly be pretty far from C, and even probably looking like garbage if you don't know anything about them.
Instant thoughts are APL, Snobol and Teco :)QuoteFirst: Forth. Yes, it's actually classified as a procedural language.
Then there would be Smalltalk. I'm not completely sure it qualifies as a "procedural language", although it's a bit hard to tell. I'll leave it to your appreciation.
Now of course, if we consider more recent languages, there are quite a few that are a bit hard to decipher, even though they are still procedural, although most of those would, admittedly, not be widely popular.
Smalltalk-80 definitely isn't procedural; it is object oriented with a vengence! The earlier Smalltalks are more ambiguous but were internal research languages. The only earlier OOP language I'm aware of is Simula.
Forth definitely is procedural.
Back in the day, there was a horribly believable urban myth that Word for Windows was actuallywrittenevolved in Prolog...
Back in the day, there was a horribly believable urban myth that Word for Windows was actually written in Prolog...
Speaking as someone who has delved into the intricacies of PL/I, I am
sure that only Real Men could have written such a machine-hogging,
cycle-grabbing, all-encompassing monster. Allocate an array and free
the middle third? Sure! Why not? Multiply a character string times a
bit string and assign the result to a float decimal? Go ahead! Free a
controlled variable procedure parameter and reallocate it before
passing it back? Overlay three different types of variable on the same
memory location? Anything you say! Write a recursive macro? Well,
no, but Real Men use rescan. How could a language so obviously
designed and written by Real Men not be intended for Real Man use?
I think that a C user will find PL/I somewhat alien, but would suggest that the learning experience will be very much worthwhile.
The traditional FORTRAN school contributed not only handy calculation-by-formula but also automatic calculation with whole arrays of data, one of the main virtues of the language APL. The COBOL camp contributed very powerful I/0 formatting, particularly through an option called PICTURE; formatting using the PICTURE option allows such nice things as floating dollar signs and commas in numbers (BASIC programmers have a crude form of this feature in the PRINT USING statement). The ALGOL language (which isn't widely known these days) contributed such features as a free format for program layout, and block structuring, which helps us make sure that one part of a program doesn't interfere with another. Anyone who is familiar with Pascal and C is accustomed to these virtues; they were not only uncommon when PL/I was created, but PL/I's versions of these features are probably better than those of any language created since. The final element, which I called new topics, consisted of several abilities that had not existed before in major programming languages. These included support of interrupts (similar to BASIC's ON KEY and ON ERROR statements), use of macros and program libraries, and other goodies of that ilk.
Generalizing a bit yes, but I can think of at least a couple languages that would certainly be pretty far from C, and even probably looking like garbage if you don't know anything about them.
...
Then there would be Smalltalk. I'm not completely sure it qualifies as a "procedural language", although it's a bit hard to tell. I'll leave it to your appreciation.
Smalltalk-80 definitely isn't procedural; it is object oriented with a vengence!
I can think of some that look like garbage even if you do know them. APL and Lisp, I'm looking at you both.
Smalltalk is definitely a procedural language. Versions were Smalltalk-80 and Smalltalk-74, both a bit outside the '50 year old' criteria.
Generalizing a bit yes, but I can think of at least a couple languages that would certainly be pretty far from C, and even probably looking like garbage if you don't know anything about them.
I can think of some that look like garbage even if you do know them. APL and Lisp, I'm looking at you both.Quote...
Then there would be Smalltalk. I'm not completely sure it qualifies as a "procedural language", although it's a bit hard to tell. I'll leave it to your appreciation.
Smalltalk is definitely a procedural language. Versions were Smalltalk-80 and Smalltalk-74, both a bit outside the '50 year old' criteria.
I can think of some that look like garbage even if you do know them. APL and Lisp, I'm looking at you both.
People think that the weirdest thing about APL is the character set. It's not: the right-to-left evaluation order is far weirder... even if Iverson had a good reason for doing it that way,
QuoteSmalltalk is definitely a procedural language. Versions were Smalltalk-80 and Smalltalk-74, both a bit outside the '50 year old' criteria.
I agree. And something that almost everybody overlooks is that it was defined with a substantial number of "big project" features including enforced naming conventions and internal documentation. But Kay was an extremely clued-up guy, with experience that eclipsed the authors of APL, Lisp and the rest.
Yes he was, but I've never liked the lack of namespaces in Smalltalk. Your project's concept of "Tank" is unlikely to be my project's concept :)
Smalltalk-80 definitely isn't procedural; it is object oriented with a vengence!
That doesn't make it non-procedural. Procedures being named methods, and procedure calls being named 'sending messages' doesn't stop them from being procedures.
Generalizing a bit yes, but I can think of at least a couple languages that would certainly be pretty far from C, and even probably looking like garbage if you don't know anything about them.
I can think of some that look like garbage even if you do know them. APL and Lisp, I'm looking at you both.Quote...
Then there would be Smalltalk. I'm not completely sure it qualifies as a "procedural language", although it's a bit hard to tell. I'll leave it to your appreciation.
Smalltalk is definitely a procedural language. Versions were Smalltalk-80 and Smalltalk-74, both a bit outside the '50 year old' criteria.
... only in the sense that you can write Fortran in any language.
The key differentiating point is that the "procedure" is "contained in" the data upon which the method is invoked. That's an inversion of standard procedural semantics.
Which method is executed cannot be known at compile time. It is associated with the "receiving" object and can only be known at runtime via a dynamic lookup. Hence the Smalltalk equivalent of a null pointer exception: Object doesNotUnderstand. (Of course in Smalltalk that can be intercepted and used for some powerful purposes, e.g. proxy objects that forward messages to the "real" recipient.)
It takes real perversity to use Smalltalk procedurally - basically you need a single instance of a single class, with all methods hung off that object.
Yes he was, but I've never liked the lack of namespaces in Smalltalk. Your project's concept of "Tank" is unlikely to be my project's concept :)
That was surely an opportunity for "Bigtalk" :)
Generalizing a bit yes, but I can think of at least a couple languages that would certainly be pretty far from C, and even probably looking like garbage if you don't know anything about them.
I can think of some that look like garbage even if you do know them. APL and Lisp, I'm looking at you both.Quote...
Then there would be Smalltalk. I'm not completely sure it qualifies as a "procedural language", although it's a bit hard to tell. I'll leave it to your appreciation.
Smalltalk is definitely a procedural language. Versions were Smalltalk-80 and Smalltalk-74, both a bit outside the '50 year old' criteria.
... only in the sense that you can write Fortran in any language.
The key differentiating point is that the "procedure" is "contained in" the data upon which the method is invoked. That's an inversion of standard procedural semantics.
No, the procedure is not contained in the data, it is [indirectly] contained in the method dictionary of the class that a particular object belongs to.
Standard procedural semantics is that one has types and has procedural operations on those types, Smalltalk just makes it easier to extend those types and their operators. It is still all done procedurally, there is no inference, there is no functional composition.
QuoteWhich method is executed cannot be known at compile time. It is associated with the "receiving" object and can only be known at runtime via a dynamic lookup. Hence the Smalltalk equivalent of a null pointer exception: Object doesNotUnderstand. (Of course in Smalltalk that can be intercepted and used for some powerful purposes, e.g. proxy objects that forward messages to the "real" recipient.)
It takes real perversity to use Smalltalk procedurally - basically you need a single instance of a single class, with all methods hung off that object.
Procedural programming languages are ones in which one writes procedures: "Do this, if that, then do this" and so on. This is what one does in Smalltalk. Run time binding does not make "anObject add: anotherObject" un-procedural, it just means that the programmer doesn't have to manage the bookkeeping as to which particular procedure gets invoked to deal with what type "anObject" is at the moment. Contrast with functional programming languages. If you still want to insist that Smalltalk is not procedural, please provide an example in Smalltalk that does not "follow a procedure" to get the result that one is looking for.
Yes he was, but I've never liked the lack of namespaces in Smalltalk. Your project's concept of "Tank" is unlikely to be my project's concept :)
There are namespaces in Smalltalk-80 but they are not exposed in the language, they are part of the interactive programming environment. They are also implied by superclass membership. "Tank" that is a subclass of "FluidContainer" is not the same [class] object as "Tank" that is a subclass of "Weapon".
My personal disliked feature of Smalltalk is that it doesn't really have a form outside of the interactive programming environment - there is no defined denotation for "here is a class definition for 'Child' that is a subclass of 'Parent'". One or two implementations have defined methods that kind of let you write this out as a standalone program or algorithm, but none of them was quite there. It's very difficult to use Smalltalk to describe your algorithms in a paper in the way that Algol used to be used for that purpose. Similarly it's difficult to zip up an application in Smalltalk and pass it to someone else to install in the way one can the same in C, C++, python, whatever. Solving that problem properly does indeed require the formal introduction of some form of namespaces.
You appear to be confusing/conflating functional programming with object oriented programming. Either, of course, can be used to implement the other.
procedure | prəˈsiːdʒə |
noun
...
• a series of actions conducted in a certain order or manner
Correct, but that misses the point of OOP. In OOP the object/data defines the operations/procedures that can be performed on it.
In procedural programming the procedure's signature defines the type of data on which it can operate.
You appear to be confusing/conflating functional programming with object oriented programming. Either, of course, can be used to implement the other.
No, I'm not. I'm contrasting procedural programming languages with other programming language types such as functional, inferential etc. OOP is just another procedural language structuring device, it doesn't remove the procedural nature of OOP.Quote from: dictionaryprocedure | prəˈsiːdʒə |
noun
...
• a series of actions conducted in a certain order or mannerQuoteCorrect, but that misses the point of OOP. In OOP the object/data defines the operations/procedures that can be performed on it.
If they "defines the operations/procedures" how are they not procedural if you've just chosen the term "procedure" to describe them.QuoteIn procedural programming the procedure's signature defines the type of data on which it can operate.
No, that's not one means by procedural programming, in procedural programming one writes procedures, one doesn't leave the programming language to decide for itself how to solve the problem as one does in functional languages, inferential languages and so on.
You're using your own definition of procedural programming apparently to mean "contains something called a procedure". If there's conflation going on here it's you conflating the 'procedure' in procedural language and the 'procedure' as one of the many names for a packaged sequence of program steps also known as a routine, subroutine or function'.
Yes he was, but I've never liked the lack of namespaces in Smalltalk. Your project's concept of "Tank" is unlikely to be my project's concept :)
There are namespaces in Smalltalk-80 but they are not exposed in the language, they are part of the interactive programming environment. They are also implied by superclass membership. "Tank" that is a subclass of "FluidContainer" is not the same [class] object as "Tank" that is a subclass of "Weapon".
I wondered if someone would raise that canard. What if I replaced "Tank" with "Person"? What should be its superclass? IMNSHO, a fluid containing Tank should have an instance variable of type Cuboid, whereas a vehicular Tank could reasonably be a subclass of Vehicle with instance variable "Bore".
OOP neophytes tend to overuse inheritance where composition is preferable. I have even seen a project where Currency was defined as a subclass of Integer, whereas it would have been better defined as a subclass of Object with an instance variable of type Integer.
OOP neophytes tend to overuse inheritance where composition is preferable. I have even seen a project where Currency was defined as a subclass of Integer, whereas it would have been better defined as a subclass of Object with an instance variable of type Integer.
One who knows the Smalltalk class hierarchy well probably would start with a subclass of Magnitude, likely Fraction and extend it with an instance variable for scale as a starting point and override behaviour such that the scaling was handled transparently. Where there is shared behaviour with a superclass one should subclass to gain the benefit of inheriting all that preprogrammed behaviour*, starting at Object and inheriting almost bugger all behaviour and no arithmetic operations (not even add:) wouldn't be very helpful for a currency class where comparison, ordering operators and arithmetic might prove to be rather useful. :)
*Truthfully, there's not much in Magnitude, just < <= > >= and between:and: but it's a logical place to start.
I suggest you read, learn and inwardly digest the entirety of https://en.wikipedia.org/wiki/Object-oriented_programming
In procedural programming the procedures specify the types of data on which they can operate.
In object oriented programming the data specifies the procedures that can operate on it.
I hope you can see the essential difference there. Data-centric vs operation-centric.
OOP neophytes tend to overuse inheritance where composition is preferable. I have even seen a project where Currency was defined as a subclass of Integer, whereas it would have been better defined as a subclass of Object with an instance variable of type Integer.
One who knows the Smalltalk class hierarchy well probably would start with a subclass of Magnitude, likely Fraction and extend it with an instance variable for scale as a starting point and override behaviour such that the scaling was handled transparently. Where there is shared behaviour with a superclass one should subclass to gain the benefit of inheriting all that preprogrammed behaviour*, starting at Object and inheriting almost bugger all behaviour and no arithmetic operations (not even add:) wouldn't be very helpful for a currency class where comparison, ordering operators and arithmetic might prove to be rather useful. :)
*Truthfully, there's not much in Magnitude, just < <= > >= and between:and: but it's a logical place to start.
No, you wouldn't.
You would define a Class Currency (superclass Object) that "has-a" (==instance variable) value which is an Integer (etc). Relevant arithmetic operations would then be delegated to the value. Irrelevant operations would not appear in the Currency's signature.
Delegation is a key OOP concept, one that is very powerful.
"Your" inheritance technique would allow all sorts of irrelevant operations on Currency, e.g. taking the cosine of it! Preventing that would break the vitally important Liskov Substitution Principle.
I suggest you read, learn and inwardly digest the entirety of https://en.wikipedia.org/wiki/Object-oriented_programming
In procedural programming the procedures specify the types of data on which they can operate.
In object oriented programming the data specifies the procedures that can operate on it.
I hope you can see the essential difference there. Data-centric vs operation-centric.
That's fun. =)
OOP can actually be more or less implemented on top of various paradigms. It's not really a paradigm in itself. Well, not so much anyway.
OCaml, for instance, is OO but also essentially functional. I think Scala as well.
And yes, the most common implementations of OOP are usually procedural - they tie data structures to methods, which are nothing but procedures.
Smalltalk is a bit of a weird animal, but I would also have considered it procedural. Now I admit I am not a Smalltalk expert.
But interestingly enough, OOP is frequently depicted (in many definitions even) as a paradigm in itself, on the same "level" as procedural, or functional. Which I don't quite agree with either. But if you stick to those commonly seen definitions, you'll naturally be inclined to claim they are different.
In procedural programming the procedures specify the types of data on which they can operate.
In object oriented programming the data specifies the procedures that can operate on it.
I suggest you read, learn and inwardly digest the entirety of https://en.wikipedia.org/wiki/Object-oriented_programming
In procedural programming the procedures specify the types of data on which they can operate.
In object oriented programming the data specifies the procedures that can operate on it.
I hope you can see the essential difference there. Data-centric vs operation-centric.
Wikipedia is not necessarily the last word in definitive knowledge.
"Procedural programming", to me and others here, means explicitly specifying the steps to be followed and in what order. Normal object-oriented programming languages such as Smalltalk and C++ and Java are a subset of procedural programming, not something different. Languages such as Lisp and Scheme are a different subset of procedural programming.
Prolog, SNOBOL, Haskell are examples of non-procedural languages.
This is all departing a long way from OP's question, and by now is probably telling him nothing he doesn't know.In procedural programming the procedures specify the types of data on which they can operate.
In object oriented programming the data specifies the procedures that can operate on it.
I am extremely uncomfortable with that as a definition, and suggest that you're confusing the concepts of "named procedure with predefined parameter types" and "procedure comprising a specific series of operations".
As others have pointed out, Smalltalk's methods are basically procedures, even if the parameters are specified unconventionally (by comparison with contemporary mainstream languages). And most if not all conventional languages with OO pretensions have an implicit parameter referring to the object being manipulated. There really is no distinct dividing line.
On the other hand are things like Prolog, and SQL queries. But even in those cases there are usually facilities for embedding specific procedural sequences.
I was once discussing Prolog and Japan's "5th Generation" project, and pointed out that the implementers had to tack on a substantial amount (the details are largely lost) of procedural capability to make it usable for systems programming. However somebody made the thought-provoking comment that it would probably be possible to handle e.g. an interrupt with adequately detailed Prolog-style rules: "We've got an interrupt, change the state of the system so that there is no longer an interrupting condition".
But in any event, the distinctions between the various paradigms (procedural, OO, functional, inferential and so on) is extremely vague, and I'd suggest that more than anything else they're the result of turf wars in academic research and laziness in academic teaching.
That is internally consistent, but not with the common usage of the term.
Well, everything is defined by the sequence of program counter states!
Prolog is very powerful in a limited domain. Two fundamental problems are side effects (e.g. I/O) and the closed world assumption. The former also arises with pure functional languages.
There is certainly grey areas between the various paradigms, and the pure academics can have fun debating them. And that's without considering that they are , like C++ Templates, Turing complete :)
...and the water is muddied enormously when a fairly pure initial implementation is polluted by extensions which depart far from the original paradigm since the implementor considers it easier to tack in a bit of OO or procedural parsing than to be able to link with other languages. A classic example of that was Turbo Prolog.
MarkMLl
[..]You're discounting FORTH and the whole LISP family of languages there. But yes, outside of those, that capability was (for the better or worse) rare.
And for that matter, there were few if any languages where the underlying syntax could be changed until the 2010s: while e.g. C++ allows operators to be overloaded it's not generally possible to add a novel operator or define a novel flow control syntax on whim.
[..]
Well, everything is defined by the sequence of program counter states!
Until one starts thinking about parallel systems, where the program counters exhibit not merely different sequences but might be subject to vastly different clock rates.
And without intending to imply any cause-or-effect relationship, that's particularly the case in an OO system or one where different subproblems can be "farmed out" (e.g. multithreaded searching in the logic programming domain).
QuoteProlog is very powerful in a limited domain. Two fundamental problems are side effects (e.g. I/O) and the closed world assumption. The former also arises with pure functional languages.
I agree, but one problem is that there are always people who are prepared to push a paradigm or a language implementation far beyond what is reasonable. I remember once settling down to read a paper discussing the use of APL for astronomical calculations expecting that the author had hit upon some way of describing planetary orbits in matrix form, only to find that it was a transcpt of Meeus's standard numerical methods which could have been better done in BASIC... or even COBOL.
QuoteThere is certainly grey areas between the various paradigms, and the pure academics can have fun debating them. And that's without considering that they are , like C++ Templates, Turing complete :)
...and the water is muddied enormously when a fairly pure initial implementation is polluted by extensions which depart far from the original paradigm since the implementor considers it easier to tack in a bit of OO or procedural parsing than to be able to link with other languages. A classic example of that was Turbo Prolog.
MarkMLl
I never used Turbo Prolog but I remember seeing it announced and I'd written a few Prolog programs before that. I don't know anything about cancerous growth, but what I do remember from the announcement was that it was missing at least one absolutely fundamental standard Prolog feature! Was it assert & retract?
I have, however, sold and supported a fair number of development tools, and have always been disappointed by how many companies attempted to have cross-language linkability and how quickly they folded afterwards. MS was one of the first to pull it off with .NET, which suffered from starting big leaving people who wanted to get it working truly cross-platform an almost impossible job.
DEC did a pretty usable job on VAX/VMS by (among other things) making the compilers for every language (BASIC, Pascal, FORTRAN, COBOL) able to create and accept function arguments using all the calling conventions native to any of the other languages. Providing high level machine code procedure call instructions probably helped that too, even if it meant the actual performance was not as good as the older PDP 11/70 -- or for that matter using the VAX instruction set in a more RISCy way.
** El Reg would come up with something like Totally Incapable To Support Unexpected Paradigms to explain that.
Prolog is pretty dead nowadays. Why?
Prolog is pretty dead nowadays. Why?
One-trick pony.
** El Reg would come up with something like Totally Incapable To Support Unexpected Paradigms to explain that.
Since you mention that filthy rag ... ahhh, the old days of Magee and Lettice ... back when I was a 3rd year university student in Hamilton NZ in 1983 I got friendly with a 1st year student called Simon in one of the programming classes I was tutoring. Simon, myself, and a masters' student (Lawrence D'Oliviero) ended up having lunch together most days. Simon didn't get along with studies and dropped out and later got a job as a trainee operator on the university VAXes. (As I recall, he worked in retail at Dick Smith Electronics in between) He also started writing a satirical column for the student newspaper about the frustrations of the operator job. The rest is history.
Prolog is pretty dead nowadays. Why?
I'd like to learn and use Ruby. Unfortunately, I haven't yet found a book with Ruby examples. Maybe it depends on my domain of interests.
I'd like to learn and use Ruby. Unfortunately, I haven't yet found a book with Ruby examples. Maybe it depends on my domain of interests.
What would you like to implement with it?
Prolog is pretty dead nowadays. Why?
Have you taken a look?
What is Stood-2010, and what part was written in Prolog?
I like Ruby a lot more than Python and shoul be using it for my quick-and-dirty stuff, but my fingers still effortlessly flow out Perl for that.
... shoul be using it for my quick-and-dirty stuff, but my fingers still effortlessly flow out Perl for that.
The Perl community got the language a terminally-bad reputation by being excessively clever: uncommented one-liners for the sake of it.
perl -pe '$_ = " $_"' foo.c
What is Stood-2010, and what part was written in Prolog?
The one related to the project design and constraints analysis , but I am not expert, and I cannot publish sources. Basically, the Prolog modules are
(snip)
80% is written in Prolog. All the processing core.
edit:
there is also a "C-code reverse engineering" module written in Prolog, but it doesn't compile.
there is also a "C-code reverse engineering" module written in Prolog, but it doesn't compile.:popcorn:
Sure Prolog is not bad for that, although I think there are better solutions these days for rule-based systems and logic proofing.
I'm torn over the desirability of fancy application-specific notations which are completely unlike conventional programming languages. On the one hand they have the desirable property of being familiar to their users, while on the other they provide an excuse for academics to waste years coming up with efficient and reliable parsers.
This is more often seen in the form of Domain Specific Language vs Domain Specific Library.
It seems that all computer science graduates hanker after creating a language, just as electronic engineer graduates want to create a processor.
It is sometimes not apprectiated that the reason the intel 8080 won over most of the competition was not because of the (mediocre) silicon, it was because intel provided an ecosystem of boards, languages, and development systems.
It is sometimes not apprectiated that the reason the intel 8080 won over most of the competition was not because of the (mediocre) silicon, it was because intel provided an ecosystem of boards, languages, and development systems.
Although BCPL is more often described as being a descendant of CPL, hence of ALGOL, making it a sibling of PL/I rather than a descendant.
Considered as a system implementation language, BCPL was also a contemporary of Wirth's PL/360 at Stanford.
MarkMLl
It is sometimes not apprectiated that the reason the intel 8080 won over most of the competition was not because of the (mediocre) silicon, it was because intel provided an ecosystem of boards, languages, and development systems.
The 8080 was losing ground to Intel's own 8048 on one hand and Zilog's Z80 on the other... not to mention Motorola's 68k. The 8086 was widely regarded as lackluster with many potential customers hanging on for Zilog's promised Z800 (running at an unprecedented 20MHz), the thing that saved Intel was IBM's adoption of the 8088 in an unprecedentedly-open architecture which meant that developers no longer had to shell out for Intel blue boxes or hunt for a spare ASR33.
MarkMLl
The definitive statement on this would come from Martin Richards the designer of BCPL, who is still alive and kicking. The paper "How BCPL evolved from CPL" (https://www.cl.cam.ac.uk/~mr10/cpl2bcpl.pdf) would seem, just in its title, to be a pretty authoritative statement on the matter. CPL had it's conceptual origins in 1961, PL/1 in 1963, and BCPL in 1966.
Although BCPL is more often described as being a descendant of CPL, hence of ALGOL, making it a sibling of PL/I rather than a descendant.
Considered as a system implementation language, BCPL was also a contemporary of Wirth's PL/360 at Stanford.
MarkMLl
The definitive statement on this would come from Martin Richards the designer of BCPL, who is still alive and kicking. The paper "How BCPL evolved from CPL" (https://www.cl.cam.ac.uk/~mr10/cpl2bcpl.pdf) would seem, just in its title, to be a pretty authoritative statement on the matter.
let w ≡ xx + 2xy + yy
Every time w is evaluated it recomputes xx + 2xy + yy
using the current values of x and y. In CPL, xx means x
multiplied by x. Multi-character identifiers had to start
with capital letters.
I've been thinking for a while now that the only reasonable use of the "let" keyword in a programming language should be exactly this: defining substitutions of expressions that hold true at all times, just like the way we use "let" in math. (But the CPL language required the '≡' symbol instead of '=', which I don't particularly like here. To me, that should be the meaning of "let".)
Using "let" merely as a way to declare, and initialize a variable, is wrong on every level IMHO.
strong dislike of the use of '=' as an assignment operator; I much prefer the Algol 68 ':=' (read out loud as 'becomes'), or the backarrow character '←' for assignment. I dislike backarrow simply because it shows up poorly in many fonts and we don't have it on keyboards nowadays (We did in the past, but it got usurped by the underscore.).
a = b == c = d;
Using "let" merely as a way to declare, and initialize a variable, is wrong on every level IMHO.
But the ability of defining substitutions would be tremendously useful and help with code clarity and code correctness. I don't think I have seen this in a modern language. Please point me to one if there is - certainly there are some of them that I don't know well, especially the functional languages.
I've been thinking for a while now that the only reasonable use of the "let" keyword in a programming language should be exactly this: defining substitutions of expressions that hold true at all times, just like the way we use "let" in math. (But the CPL language required the '≡' symbol instead of '=', which I don't particularly like here. To me, that should be the meaning of "let".)
Using "let" merely as a way to declare, and initialize a variable, is wrong on every level IMHO.
You won't find me disagreeing with you there. I think the Cambridge computer scientists were mathematicians first, and that tends - as you note - toward that interpretation of the work 'let'. In a similar vein, I have a strong dislike of the use of '=' as an assignment operator; I much prefer the Algol 68 ':=' (read out loud as 'becomes'), or the backarrow character '←' for assignment. I dislike backarrow simply because it shows up poorly in many fonts and we don't have it on keyboards nowadays (We did in the past, but it got usurped by the underscore.).
Same here. Of course the back-arrow can just be typed as '<-'. That's usually not hard to read, although it looks better with some fonts than others.
:= was originally a digraph for the arrow.
However, something like <- is going to be an enormous problem if not immediately supported by the character set: having something like x<-2 requires either that a type has already been associated with x and that the lexer has access to the symbol table, or that the lexer starts being sensitive to whitespace to an extent that has usually been avoided.
Same here. Of course the back-arrow can just be typed as '<-'. That's usually not hard to read, although it looks better with some fonts than others.
I'm a fan of overstrikes since they worked well in APL: the worst thing that happened to the language was moving it onto a terminal which tried to have a separate key for each operator e.g. http://www.aplusdev.org/keyboard.gif (http://www.aplusdev.org/keyboard.gif)
However, something like <- is going to be an enormous problem if not immediately supported by the character set: having something like x<-2 requires either that a type has already been associated with x and that the lexer has access to the symbol table, or that the lexer starts being sensitive to whitespace to an extent that has usually been avoided.
MarkMLl
proc double (int x) int =
begin
x * 2
end
PROC double (INT x) INT =
BEGIN
x * 2
END
'PROC' DOUBLE ('INT' X) 'INT' =
'BEGIN'
X * 2
'END'
The first Algol-68 compiler I had access to (on an ICL 1900) used the latter 'stropping' style. Yuch!
Algol-68 has, in its definition, 'large syntactic marks' and 'small syntactic marks' which in presentation form becomes:
Algol-68 has, in its definition, 'large syntactic marks' and 'small syntactic marks' which in presentation form becomes:
I have a theory that ALGOL-68 was sabotaged: it was never meant to succeed.
MarkMLl
int a variable := 1;
(ref int a variable = loc int) := 1;
int a constant = 1;
Algol-68 has, in its definition, 'large syntactic marks' and 'small syntactic marks' which in presentation form becomes:
I have a theory that ALGOL-68 was sabotaged: it was never meant to succeed.
But from what I've seen, ALGOL-68 was just too complex for its time.
Thanks, there's a lot of meat in that. One thing that's somewhat understated is that they appeared to be using "Pascal style" calling convention where the return opcode knows how many operands to pop off the stack, somewhere in between BCPL and C that was changed to allow the caller to use a variable number of parameters.
But from what I've seen, ALGOL-68 was just too complex for its time. So, maybe that was the obvious part that made it unpractical. The tooling was just too hard to write. Wirth went on with Algol-W and then Pascal. Algol-68 died.
Letting Adriaan van Wijngaarden persuade people to use a van Wijngaarden two-level grammar as the definitive version of the language definition was tantamount to sabotage. I know people who can 'out compiler' me before breakfast who can't get their heads around VWF.
But from what I've seen, ALGOL-68 was just too complex for its time. So, maybe that was the obvious part that made it unpractical. The tooling was just too hard to write. Wirth went on with Algol-W and then Pascal. Algol-68 died.
I'm afraid your chronology is wrong there. ALGOL-W preceded ALGOL-68 and was Wirth's proposal.
But still, back to Algol-68: it was definitely too unpractical to see the light, whatever the politics otherwise did to undermine it.
Well, see the light it did.
Algol W had a compiler, and some people actually used it before Pascal was fully spec'ed and compilers available for it.
Well, see the light it did.
OTOH, were those really full implementations of the language as specified? I think I recognise some of the ones you've mentioned as having documented limitations.
I think the bottom line is that a very high proportion of the implementations of "large" languages turn out to be incomplete: ALGOL-68, Ada, and (with a nod to OP, if he's still with us :-) PL/I
Which is possibly an argument for keeping languages small but strict, and moving as much functionality as possible into support libraries.
MarkMLl
My school had a different system. We did FORTRAN on pre-scored cards that we removed chads from using a paper clip or ballpoint pen. The applied maths teacher (who is now 97 and I visited just before Christmas) took them into a local bureau that had a B1700 and ran them through (in person) after the night's processing was finished. I went in with him a few times as I was doing personal programs a lot more complicated than the class assignments and they often needed debugging or incremental improvements.
Ah yes, the B1700. Booted by reading 16 bits at a time from a cassette and treating it as an opcode... which also allowed MTR (Maintenance Test Routine) tapes to do a useful amount of work. Which was needed, since the "Burroughs Transistor Logic" it used got more and more flaky as the company had difficulty sourcing it (I think it was really Fairchild CTL).
Which is possibly an argument for keeping languages small but strict, and moving as much functionality as possible into support libraries.
Yesterday I read chapter 3 and as far as I understand the PL / I examples need to call external functions from libraries to access a single byte. Digging deeper into the reason showed that IO instructions on some machines were only able to access 36 ( :-// ) bits, so functions are needed to manipulate 8 bits.
That's new for me. I am used to use uint8_t, uint16_t and uint32_t directly provided by the C language.
Glad you're still with us and hope you don't find the wibble too distracting :-)
A thing I learned from the book: IBM was one of the first implementing a full B*tree algorithm to manage database records on hard-disk. That's something I have to carefully learn and implement for the firmware of my embroidery machine which is currently memory-only based.
Which is possibly an argument for keeping languages small but strict, and moving as much functionality as possible into support libraries.
Yesterday I read chapter 3 and as far as I understand the PL / I examples need to call external functions from libraries to access a single byte. Digging deeper into the reason showed that IO instructions on some machines were only able to access 36 ( :-// ) bits, so functions are needed to manipulate 8 bits.
That's new for me. I am used to use uint8_t, uint16_t and uint32_t directly provided by the C language.
That was admittedly pretty atrocious for general-purpose use, and hardly the language's fault. Just the hardware.
The ICL machines were widely used at the time, were good, and were certainly good for general purpose uses. But not for C!
It has to be said: widely used in the UK,
And theempirecommonwealth, don't forget. :)
as far as I understand the PL / I examples need to call external functions from libraries to access a single byte. Digging deeper into the reason showed that IO instructions on some machines were only able to access 36 bitsIt must be a particularly ancient PL/1 book! By the time PL/1 was catching on, IBM Mainframes were pretty commonly 32bit machines with byte addressability.
It must be a particularly ancient PL/1 book!
This book was printed in 1969That would do it. It was about 10 years later when the Oil Company IBM shop I had my summer job at was starting to go "IBM is really trying to push PL/1 a a replacement for Fortran; I guess we should start taking that seriously!"(Note that dealing with "bytes" in fortran was also rather challenging!)
QuoteThis book was printed in 1969That would do it. It was about 10 years later when the Oil Company IBM shop I had my summer job at was starting to go "IBM is really trying to push PL/1 a a replacement for Fortran; I guess we should start taking that seriously!"(Note that dealing with "bytes" in fortran was also rather challenging!)
Should it be a surprise that 40 odd years after you were initially taught it, a field that wasn't even 40 years old at the time you were taught it has moved on a bit?No, it shouldn't be. And I wasn't really surprised, after my initial reaction.
do you have a box of 80 column cards going spare?You mean like these?[attachimg=1]
I think I sort-of thought that most of CS was like math - long established stuff
If one thinks computer science has moved on a bit the progress has been glacial compared to biochemistry.
There sure have been a bunch of new algorithms. But do you consider them fundamental advances in CS? That's up for some debate. I tend to consider them "engineering", mostly iterations on existing principles.
I don't think I've ever discovered a new algorithm for anything
There sure have been a bunch of new algorithms. But do you consider them fundamental advances in CS? That's up for some debate. I tend to consider them "engineering", mostly iterations on existing principles.
If you said "proofs" in place of "algorithms" and "mathematics" in place of "CS" you'd have a bunch of mathematicians, including some Field's Medal and Nobel Prize winners hunting your scalp. Algorithms are the lifeblood of computing, and inventing significant ones de novo is definitely a step above engineering.
But back to the original question: will PL/I make sense to a programmer (most) familiar with C?
PL/I is a bit of an oddball in that it supports recoverable exceptions (I only know of Common Lisp which offers the same) for which some support of dynamic scoping (like old LISPs) is needed. That'll be quite alien to a C programmer.
But back to the original question: will PL/I make sense to a programmer (most) familiar with C?
PL/I is a bit of an oddball in that it supports recoverable exceptions (I only know of Common Lisp which offers the same) for which some support of dynamic scoping (like old LISPs) is needed. That'll be quite alien to a C programmer.
And Dylan.
It's pretty simple. Basically the catch block is compiled the same as a nested function (with visibility of local variables in the enclosing function), and added to a list of handlers. When an exception is encountered the handler is searched for and run WITHOUT unwinding the stack first. If the exception can be recovered from then the handler does that and just returns. If it's not recoverable then a non-local GOTO is done to the code after the catch block -- using setjmp/longjmp or a similar mechanism.
But back to the original question: will PL/I make sense to a programmer (most) familiar with C?
PL/I is a bit of an oddball in that it supports recoverable exceptions (I only know of Common Lisp which offers the same) for which some support of dynamic scoping (like old LISPs) is needed. That'll be quite alien to a C programmer.
And Dylan.
It's pretty simple. Basically the catch block is compiled the same as a nested function (with visibility of local variables in the enclosing function), and added to a list of handlers. When an exception is encountered the handler is searched for and run WITHOUT unwinding the stack first. If the exception can be recovered from then the handler does that and just returns. If it's not recoverable then a non-local GOTO is done to the code after the catch block -- using setjmp/longjmp or a similar mechanism.
gcc supports nested functions so you could do the same thing in slightly enhanced C if you wanted to do the bookkeeping manually.
But back to the original question: will PL/I make sense to a programmer (most) familiar with C?
PL/I is a bit of an oddball in that it supports recoverable exceptions (I only know of Common Lisp which offers the same) for which some support of dynamic scoping (like old LISPs) is needed. That'll be quite alien to a C programmer.
And Dylan.
Yeah, it's a pity that Dylan didn't make it. I thought it had the nice features of a modern Lisp with infix notation (the later releases of Dylan at least) for mass-appeal.
It's pretty simple. Basically the catch block is compiled the same as a nested function (with visibility of local variables in the enclosing function), and added to a list of handlers. When an exception is encountered the handler is searched for and run WITHOUT unwinding the stack first. If the exception can be recovered from then the handler does that and just returns. If it's not recoverable then a non-local GOTO is done to the code after the catch block -- using setjmp/longjmp or a similar mechanism.
gcc supports nested functions so you could do the same thing in slightly enhanced C if you wanted to do the bookkeeping manually.
Hmmh, I have no firsthand experience, but I thought PL/I's exception handling would be even more like Common Lisp in that for function not defining a handler, but called by those which did, *that* handler would be used (the dynamic scoping part). I don't think that is easily replicable with longjmps or nested functions.
But back to the original question: will PL/I make sense to a programmer (most) familiar with C?
PL/I is a bit of an oddball in that it supports recoverable exceptions (I only know of Common Lisp which offers the same) for which some support of dynamic scoping (like old LISPs) is needed. That'll be quite alien to a C programmer.
But back to the original question: will PL/I make sense to a programmer (most) familiar with C?
PL/I is a bit of an oddball in that it supports recoverable exceptions (I only know of Common Lisp which offers the same) for which some support of dynamic scoping (like old LISPs) is needed. That'll be quite alien to a C programmer.
As does Object Pascal, any language which supports an eval() e.g. Perl, and for that matter I know of an ALGOL-60 implementation which goes some way towards having them.
I ... don't think you're understanding the concept.
Recoverable exceptions is extending that (hardware assisted) idea to exceptions thrown in a program. For example your program might be writing to a disk file when the disk fills up. An exception is thrown. The handler sees that there is a bunch of stuff in /tmp and deletes it (or compresses some log files or something). The handler returns to the point of the throw and the write to the file succeeds, without the user's program being aware that anything ever happened, or having to be coded specially in any way.
*The* algorithm was invented by Edsger Dijkstra and named the "shunting yard" algorithm because its operation resembles that of a railroad shunting yard.
I ... don't think you're understanding the concept.
Recoverable exceptions is extending that (hardware assisted) idea to exceptions thrown in a program. For example your program might be writing to a disk file when the disk fills up. An exception is thrown. The handler sees that there is a bunch of stuff in /tmp and deletes it (or compresses some log files or something). The handler returns to the point of the throw and the write to the file succeeds, without the user's program being aware that anything ever happened, or having to be coded specially in any way.
I understand the concept perfectly well. In what way does OP not do what you're describing?